diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -768,28 +768,25 @@ const MachineRegisterInfo *MRI) { VSETVLIInfo InstrInfo; - // If the instruction has policy argument, use the argument. - // If there is no policy argument, default to tail agnostic unless the - // destination is tied to a source. Unless the source is undef. In that case - // the user would have some control over the policy values. - bool TailAgnostic = true; - bool MaskAgnostic = true; + bool TailAgnostic, MaskAgnostic; unsigned UseOpIdx; - if (RISCVII::hasVecPolicyOp(TSFlags)) { - const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); - uint64_t Policy = Op.getImm(); - assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && - "Invalid Policy Value"); - // Although in some cases, mismatched passthru/maskedoff with policy value - // does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA - // policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users - // have set the policy value explicitly, so compiler would not fix it. - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; - } else if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) { + if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) { + // Start with undisturbed. TailAgnostic = false; MaskAgnostic = false; - // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. + + // If there is a policy operand, use it. + if (RISCVII::hasVecPolicyOp(TSFlags)) { + const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); + uint64_t Policy = Op.getImm(); + assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + "Invalid Policy Value"); + TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + } + + // If the tied operand is an IMPLICIT_DEF we can use TailAgnostic and + // MaskAgnostic. const MachineOperand &UseMO = MI.getOperand(UseOpIdx); MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg()); if (UseMI && UseMI->isImplicitDef()) { @@ -800,6 +797,12 @@ // tied def. if (RISCVII::doesForceTailAgnostic(TSFlags)) TailAgnostic = true; + } else { + // If there is no tied operand,, there shouldn't be a policy operand. + assert(!RISCVII::hasVecPolicyOp(TSFlags) && "Unexpected policy operand"); + // No tied operand use agnostic policies. + TailAgnostic = true; + MaskAgnostic = true; } if (!RISCVII::usesMaskPolicy(TSFlags)) diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -38,7 +38,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: li s0, 36 -; CHECK-NEXT: vsetvli zero, s0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v8, v8, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -47,7 +47,7 @@ ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: call func@plt ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v4, v8, v8, v0.t ; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; CHECK-NEXT: csrr a1, vlenb @@ -114,13 +114,13 @@ ; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; SUBREGLIVENESS-NEXT: vmclr.m v0 ; SUBREGLIVENESS-NEXT: li s0, 36 -; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, tu, mu +; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; SUBREGLIVENESS-NEXT: vfwadd.vv v8, v8, v8, v0.t ; SUBREGLIVENESS-NEXT: addi a0, sp, 16 ; SUBREGLIVENESS-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: call func@plt ; SUBREGLIVENESS-NEXT: li a0, 32 -; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v8, v0.t ; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; SUBREGLIVENESS-NEXT: csrr a1, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv1f16( %va, %m, i32 %evl) @@ -29,13 +32,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -51,13 +55,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv2f16( %va, %m, i32 %evl) @@ -69,13 +76,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,13 +99,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv4f16( %va, %m, i32 %evl) @@ -109,13 +120,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -129,17 +141,20 @@ define @vp_ceil_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv8f16( %va, %m, i32 %evl) @@ -151,13 +166,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -171,17 +187,20 @@ define @vp_ceil_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv16f16( %va, %m, i32 %evl) @@ -193,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -213,17 +233,20 @@ define @vp_ceil_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv32f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv32f16( %va, %m, i32 %evl) @@ -235,13 +258,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,13 +281,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv1f32( %va, %m, i32 %evl) @@ -275,13 +302,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -297,13 +325,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv2f32( %va, %m, i32 %evl) @@ -315,13 +346,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -335,17 +367,20 @@ define @vp_ceil_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv4f32( %va, %m, i32 %evl) @@ -357,13 +392,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -377,17 +413,20 @@ define @vp_ceil_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv8f32( %va, %m, i32 %evl) @@ -399,13 +438,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -419,17 +459,20 @@ define @vp_ceil_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv16f32( %va, %m, i32 %evl) @@ -441,13 +484,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -463,13 +507,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv1f64( %va, %m, i32 %evl) @@ -481,13 +528,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -501,17 +549,20 @@ define @vp_ceil_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv2f64( %va, %m, i32 %evl) @@ -523,13 +574,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,17 +595,20 @@ define @vp_ceil_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv4f64( %va, %m, i32 %evl) @@ -565,13 +620,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -585,17 +641,20 @@ define @vp_ceil_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv7f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI28_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI28_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv7f64( %va, %m, i32 %evl) @@ -607,13 +666,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI29_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI29_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -627,17 +687,20 @@ define @vp_ceil_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI30_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI30_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv8f64( %va, %m, i32 %evl) @@ -649,13 +712,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI31_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI31_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -670,58 +734,70 @@ define @vp_ceil_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v2, v0, a4 +; CHECK-NEXT: vslidedown.vx v25, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 3 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -741,26 +817,28 @@ ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 3 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -9,13 +9,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv1f16( %x) @@ -28,13 +29,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv2f16( %x) @@ -47,13 +49,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv4f16( %x) @@ -66,13 +69,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv8f16( %x) @@ -85,13 +89,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv16f16( %x) @@ -104,13 +109,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv32f16( %x) @@ -123,13 +129,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv1f32( %x) @@ -142,13 +149,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv2f32( %x) @@ -161,13 +169,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv4f32( %x) @@ -180,13 +189,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv8f32( %x) @@ -199,13 +209,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv16f32( %x) @@ -218,13 +229,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) @@ -237,13 +249,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv2f64( %x) @@ -256,13 +269,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) @@ -275,13 +289,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.ceil.nxv8f64( %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -9,13 +9,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv1f16( %x) @@ -28,13 +29,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv2f16( %x) @@ -47,13 +49,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv4f16( %x) @@ -66,13 +69,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv8f16( %x) @@ -85,13 +89,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv16f16( %x) @@ -104,13 +109,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv32f16( %x) @@ -123,13 +129,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv1f32( %x) @@ -142,13 +149,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv2f32( %x) @@ -161,13 +169,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv4f32( %x) @@ -180,13 +189,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv8f32( %x) @@ -199,13 +209,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv16f32( %x) @@ -218,13 +229,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv1f64( %x) @@ -237,13 +249,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv2f64( %x) @@ -256,13 +269,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv4f64( %x) @@ -275,13 +289,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.floor.nxv8f64( %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll @@ -7,7 +7,7 @@ define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,9 +31,9 @@ define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> %m, i32 %vl) @@ -57,7 +57,7 @@ define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f32_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v15f32_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -105,14 +105,14 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfwcvt.f.f.v v16, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB7_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -7,7 +7,7 @@ define <2 x half> @vfptrunc_v2f16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,9 +31,9 @@ define <2 x half> @vfptrunc_v2f16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> %m, i32 %vl) @@ -57,7 +57,7 @@ define <2 x float> @vfptrunc_v2f32_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f32_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v15f32_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -110,14 +110,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB7_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll @@ -7,9 +7,8 @@ define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) @@ -32,9 +31,8 @@ define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) @@ -57,9 +55,8 @@ define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -7,7 +7,7 @@ define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i7_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) @@ -19,7 +19,7 @@ define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i15: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl) @@ -31,7 +31,7 @@ define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) @@ -69,14 +69,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB4_2: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB4_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: .LBB4_4: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -99,9 +99,9 @@ define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) @@ -125,11 +125,11 @@ define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) @@ -155,7 +155,7 @@ define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) @@ -177,9 +177,9 @@ define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) @@ -203,9 +203,9 @@ define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v15i16_v15i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: ret %v = call <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl) @@ -217,7 +217,7 @@ define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) @@ -287,7 +287,7 @@ ; CHECK-NEXT: mv a3, t0 ; CHECK-NEXT: .LBB16_6: ; CHECK-NEXT: vle64.v v8, (a6) -; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma ; CHECK-NEXT: li a3, 16 ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb @@ -299,7 +299,7 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: li a5, 16 ; CHECK-NEXT: .LBB16_8: -; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma ; CHECK-NEXT: li a5, 64 ; CHECK-NEXT: vmv1r.v v0, v3 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t @@ -334,7 +334,7 @@ ; CHECK-NEXT: .LBB16_14: ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: li a5, 32 -; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr t0, vlenb ; CHECK-NEXT: slli t0, t0, 3 @@ -346,7 +346,7 @@ ; CHECK-NEXT: li a6, 16 ; CHECK-NEXT: .LBB16_16: ; CHECK-NEXT: addi t0, a1, 384 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v3 ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb @@ -371,7 +371,7 @@ ; CHECK-NEXT: mv a1, t0 ; CHECK-NEXT: .LBB16_20: ; CHECK-NEXT: vle64.v v8, (a6) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -379,7 +379,7 @@ ; CHECK-NEXT: # %bb.21: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB16_22: -; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v2 ; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t ; CHECK-NEXT: bltu a7, a5, .LBB16_24 @@ -433,7 +433,7 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vslideup.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a1, a1, a2 @@ -450,7 +450,7 @@ ; CHECK-NEXT: # %bb.27: ; CHECK-NEXT: li a7, 16 ; CHECK-NEXT: .LBB16_28: -; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 5 @@ -515,14 +515,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB17_2: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB17_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB17_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -511,7 +511,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; @@ -550,7 +550,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; @@ -589,7 +589,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -31,13 +34,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -53,13 +59,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -73,13 +82,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,13 +107,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -115,13 +130,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -135,17 +153,20 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -159,15 +180,18 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -183,13 +207,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -203,13 +230,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -225,13 +255,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.ceil.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -245,13 +278,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -265,17 +301,20 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.ceil.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -289,15 +328,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -311,17 +353,20 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.ceil.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -335,15 +380,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -359,13 +407,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -379,13 +430,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -399,17 +453,20 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -423,15 +480,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -445,17 +505,20 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -469,15 +532,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -491,17 +557,20 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v15f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -515,15 +584,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -537,17 +609,20 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -561,15 +636,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -583,57 +661,69 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v2, v0, 2 +; CHECK-NEXT: vslidedown.vi v25, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 3 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -644,55 +734,71 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v32f64_unmasked: ; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: vmset.m v24 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v25, v24 +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 3 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add sp, sp, a2 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -31,13 +34,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -53,13 +59,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -73,13 +82,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,13 +107,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -115,13 +130,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -135,17 +153,20 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -159,15 +180,18 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -183,13 +207,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -203,13 +230,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -225,13 +255,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -245,13 +278,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -265,17 +301,20 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -289,15 +328,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -311,17 +353,20 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -335,15 +380,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -359,13 +407,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -379,13 +430,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -399,17 +453,20 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -423,15 +480,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -445,17 +505,20 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -469,15 +532,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -491,17 +557,20 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v15f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -515,15 +584,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -537,17 +609,20 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -561,15 +636,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -583,57 +661,69 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v2, v0, 2 +; CHECK-NEXT: vslidedown.vi v25, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 2 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -644,55 +734,71 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v32f64_unmasked: ; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: vmset.m v24 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v25, v24 +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 2 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add sp, sp, a2 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -202,7 +202,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; @@ -222,7 +222,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; @@ -242,7 +242,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -249,7 +249,7 @@ define <4 x half> @slideup_v4f16(<4 x half> %x) { ; CHECK-LABEL: slideup_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -260,9 +260,9 @@ define <8 x float> @slideup_v8f32(<8 x float> %x) { ; CHECK-LABEL: slideup_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> ret <8 x float> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -1966,7 +1966,7 @@ define void @trunc_v8f16(<8 x half>* %x) { ; CHECK-LABEL: trunc_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI91_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI91_0)(a1) @@ -1974,6 +1974,7 @@ ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1987,7 +1988,7 @@ define void @trunc_v4f32(<4 x float>* %x) { ; CHECK-LABEL: trunc_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI92_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI92_0)(a1) @@ -1995,6 +1996,7 @@ ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -2008,7 +2010,7 @@ define void @trunc_v2f64(<2 x double>* %x) { ; CHECK-LABEL: trunc_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI93_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI93_0)(a1) @@ -2016,6 +2018,7 @@ ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -2029,7 +2032,7 @@ define void @ceil_v8f16(<8 x half>* %x) { ; CHECK-LABEL: ceil_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI94_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI94_0)(a1) @@ -2039,6 +2042,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -2052,7 +2056,7 @@ define void @ceil_v4f32(<4 x float>* %x) { ; CHECK-LABEL: ceil_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI95_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI95_0)(a1) @@ -2062,6 +2066,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -2075,7 +2080,7 @@ define void @ceil_v2f64(<2 x double>* %x) { ; CHECK-LABEL: ceil_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI96_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI96_0)(a1) @@ -2085,6 +2090,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -2098,7 +2104,7 @@ define void @floor_v8f16(<8 x half>* %x) { ; CHECK-LABEL: floor_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI97_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI97_0)(a1) @@ -2108,6 +2114,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -2121,7 +2128,7 @@ define void @floor_v4f32(<4 x float>* %x) { ; CHECK-LABEL: floor_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI98_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI98_0)(a1) @@ -2131,6 +2138,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -2144,7 +2152,7 @@ define void @floor_v2f64(<2 x double>* %x) { ; CHECK-LABEL: floor_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI99_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI99_0)(a1) @@ -2154,6 +2162,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -2167,7 +2176,7 @@ define void @round_v8f16(<8 x half>* %x) { ; CHECK-LABEL: round_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI100_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI100_0)(a1) @@ -2177,6 +2186,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -2190,7 +2200,7 @@ define void @round_v4f32(<4 x float>* %x) { ; CHECK-LABEL: round_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI101_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI101_0)(a1) @@ -2200,6 +2210,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -2213,7 +2224,7 @@ define void @round_v2f64(<2 x double>* %x) { ; CHECK-LABEL: round_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI102_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI102_0)(a1) @@ -2223,6 +2234,7 @@ ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll @@ -9,9 +9,8 @@ define <4 x i1> @vfptosi_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -34,9 +33,8 @@ define <4 x i1> @vfptosi_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -59,9 +57,8 @@ define <4 x i1> @vfptosi_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -9,7 +9,7 @@ define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i7_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -68,7 +68,7 @@ define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -92,9 +92,9 @@ define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -118,9 +118,9 @@ define <4 x i8> @vfptosi_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -144,7 +144,7 @@ define <4 x i16> @vfptosi_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -168,7 +168,7 @@ define <4 x i32> @vfptosi_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -190,7 +190,7 @@ define <4 x i64> @vfptosi_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -214,11 +214,11 @@ define <4 x i8> @vfptosi_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -244,9 +244,9 @@ define <4 x i16> @vfptosi_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -270,7 +270,7 @@ define <4 x i32> @vfptosi_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -294,7 +294,7 @@ define <4 x i64> @vfptosi_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -325,14 +325,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll @@ -9,9 +9,8 @@ define <4 x i1> @vfptoui_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -34,9 +33,8 @@ define <4 x i1> @vfptoui_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -59,9 +57,8 @@ define <4 x i1> @vfptoui_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -9,7 +9,7 @@ define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i7_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ define <4 x i8> @vfptoui_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define <4 x i16> @vfptoui_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -68,7 +68,7 @@ define <4 x i32> @vfptoui_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -92,9 +92,9 @@ define <4 x i64> @vfptoui_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -118,9 +118,9 @@ define <4 x i8> @vfptoui_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -144,7 +144,7 @@ define <4 x i16> @vfptoui_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -168,7 +168,7 @@ define <4 x i32> @vfptoui_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -190,7 +190,7 @@ define <4 x i64> @vfptoui_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -214,11 +214,11 @@ define <4 x i8> @vfptoui_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -244,9 +244,9 @@ define <4 x i16> @vfptoui_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -270,7 +270,7 @@ define <4 x i32> @vfptoui_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -294,7 +294,7 @@ define <4 x i64> @vfptoui_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -325,14 +325,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll @@ -11,13 +11,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x half> @llvm.round.v1f16(<1 x half> %x) @@ -30,13 +31,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x half> @llvm.round.v2f16(<2 x half> %x) @@ -49,13 +51,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x half> @llvm.round.v4f16(<4 x half> %x) @@ -68,13 +71,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x half> @llvm.round.v8f16(<8 x half> %x) @@ -87,13 +91,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <16 x half> @llvm.round.v16f16(<16 x half> %x) @@ -107,13 +112,14 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <32 x half> @llvm.round.v32f16(<32 x half> %x) @@ -126,13 +132,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x float> @llvm.round.v1f32(<1 x float> %x) @@ -145,13 +152,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x float> @llvm.round.v2f32(<2 x float> %x) @@ -164,13 +172,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x float> @llvm.round.v4f32(<4 x float> %x) @@ -183,13 +192,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x float> @llvm.round.v8f32(<8 x float> %x) @@ -202,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <16 x float> @llvm.round.v16f32(<16 x float> %x) @@ -221,13 +232,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x double> @llvm.round.v1f64(<1 x double> %x) @@ -240,13 +252,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x double> @llvm.round.v2f64(<2 x double> %x) @@ -259,13 +272,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x double> @llvm.round.v4f64(<4 x double> %x) @@ -278,13 +292,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x double> @llvm.round.v8f64(<8 x double> %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll @@ -11,13 +11,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x half> @llvm.roundeven.v1f16(<1 x half> %x) @@ -30,13 +31,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x) @@ -49,13 +51,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x) @@ -68,13 +71,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %x) @@ -87,13 +91,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %x) @@ -107,13 +112,14 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <32 x half> @llvm.roundeven.v32f16(<32 x half> %x) @@ -126,13 +132,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x float> @llvm.roundeven.v1f32(<1 x float> %x) @@ -145,13 +152,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x) @@ -164,13 +172,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x) @@ -183,13 +192,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x) @@ -202,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x) @@ -221,13 +232,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x) @@ -240,13 +252,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x) @@ -259,13 +272,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x) @@ -278,13 +292,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -256,7 +256,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, tu, ma +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret @@ -265,7 +265,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a1) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a0) @@ -544,7 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma +; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, ma ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -538,7 +538,7 @@ define <4 x i16> @slideup_v4i16(<4 x i16> %x) { ; CHECK-LABEL: slideup_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -549,9 +549,9 @@ define <8 x i32> @slideup_v8i32(<8 x i32> %x) { ; CHECK-LABEL: slideup_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> ret <8 x i32> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll @@ -5,7 +5,7 @@ define void @masked_load_v1f16(<1 x half>* %a, <1 x half>* %m_ptr, <1 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -23,7 +23,7 @@ define void @masked_load_v1f32(<1 x float>* %a, <1 x float>* %m_ptr, <1 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -41,7 +41,7 @@ define void @masked_load_v1f64(<1 x double>* %a, <1 x double>* %m_ptr, <1 x double>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a1) ; RV32-NEXT: fcvt.d.w ft0, zero ; RV32-NEXT: vmfeq.vf v0, v8, ft0 @@ -51,7 +51,7 @@ ; ; RV64-LABEL: masked_load_v1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: fmv.d.x ft0, zero ; RV64-NEXT: vmfeq.vf v0, v8, ft0 @@ -69,7 +69,7 @@ define void @masked_load_v2f16(<2 x half>* %a, <2 x half>* %m_ptr, <2 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -87,7 +87,7 @@ define void @masked_load_v2f32(<2 x float>* %a, <2 x float>* %m_ptr, <2 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -105,7 +105,7 @@ define void @masked_load_v2f64(<2 x double>* %a, <2 x double>* %m_ptr, <2 x double>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a1) ; RV32-NEXT: fcvt.d.w ft0, zero ; RV32-NEXT: vmfeq.vf v0, v8, ft0 @@ -115,7 +115,7 @@ ; ; RV64-LABEL: masked_load_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: fmv.d.x ft0, zero ; RV64-NEXT: vmfeq.vf v0, v8, ft0 @@ -133,7 +133,7 @@ define void @masked_load_v4f16(<4 x half>* %a, <4 x half>* %m_ptr, <4 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -151,7 +151,7 @@ define void @masked_load_v4f32(<4 x float>* %a, <4 x float>* %m_ptr, <4 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -169,7 +169,7 @@ define void @masked_load_v4f64(<4 x double>* %a, <4 x double>* %m_ptr, <4 x double>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a1) ; RV32-NEXT: fcvt.d.w ft0, zero ; RV32-NEXT: vmfeq.vf v0, v8, ft0 @@ -179,7 +179,7 @@ ; ; RV64-LABEL: masked_load_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: fmv.d.x ft0, zero ; RV64-NEXT: vmfeq.vf v0, v8, ft0 @@ -197,7 +197,7 @@ define void @masked_load_v8f16(<8 x half>* %a, <8 x half>* %m_ptr, <8 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -215,7 +215,7 @@ define void @masked_load_v8f32(<8 x float>* %a, <8 x float>* %m_ptr, <8 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -233,7 +233,7 @@ define void @masked_load_v8f64(<8 x double>* %a, <8 x double>* %m_ptr, <8 x double>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a1) ; RV32-NEXT: fcvt.d.w ft0, zero ; RV32-NEXT: vmfeq.vf v0, v8, ft0 @@ -243,7 +243,7 @@ ; ; RV64-LABEL: masked_load_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: fmv.d.x ft0, zero ; RV64-NEXT: vmfeq.vf v0, v8, ft0 @@ -261,7 +261,7 @@ define void @masked_load_v16f16(<16 x half>* %a, <16 x half>* %m_ptr, <16 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -279,7 +279,7 @@ define void @masked_load_v16f32(<16 x float>* %a, <16 x float>* %m_ptr, <16 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -297,7 +297,7 @@ define void @masked_load_v16f64(<16 x double>* %a, <16 x double>* %m_ptr, <16 x double>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v16f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a1) ; RV32-NEXT: fcvt.d.w ft0, zero ; RV32-NEXT: vmfeq.vf v0, v8, ft0 @@ -307,7 +307,7 @@ ; ; RV64-LABEL: masked_load_v16f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: fmv.d.x ft0, zero ; RV64-NEXT: vmfeq.vf v0, v8, ft0 @@ -326,7 +326,7 @@ ; CHECK-LABEL: masked_load_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -345,7 +345,7 @@ ; CHECK-LABEL: masked_load_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -364,7 +364,7 @@ ; RV32-LABEL: masked_load_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: addi a3, a1, 128 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: vle64.v v24, (a3) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -382,7 +382,7 @@ ; RV64-LABEL: masked_load_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: addi a3, a1, 128 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: vle64.v v24, (a3) ; RV64-NEXT: fmv.d.x ft0, zero @@ -408,7 +408,7 @@ ; CHECK-LABEL: masked_load_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: vmfeq.vf v0, v8, ft0 @@ -428,7 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 ; CHECK-NEXT: li a4, 32 -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vle32.v v24, (a3) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -455,7 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 ; CHECK-NEXT: li a4, 64 -; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vle16.v v24, (a3) ; CHECK-NEXT: fmv.h.x ft0, zero diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -5,7 +5,7 @@ define void @masked_load_v1i8(<1 x i8>* %a, <1 x i8>* %m_ptr, <1 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -22,7 +22,7 @@ define void @masked_load_v1i16(<1 x i16>* %a, <1 x i16>* %m_ptr, <1 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -39,7 +39,7 @@ define void @masked_load_v1i32(<1 x i32>* %a, <1 x i32>* %m_ptr, <1 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -56,7 +56,7 @@ define void @masked_load_v1i64(<1 x i64>* %a, <1 x i64>* %m_ptr, <1 x i64>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -73,7 +73,7 @@ define void @masked_load_v2i8(<2 x i8>* %a, <2 x i8>* %m_ptr, <2 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -90,7 +90,7 @@ define void @masked_load_v2i16(<2 x i16>* %a, <2 x i16>* %m_ptr, <2 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -107,7 +107,7 @@ define void @masked_load_v2i32(<2 x i32>* %a, <2 x i32>* %m_ptr, <2 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -124,7 +124,7 @@ define void @masked_load_v2i64(<2 x i64>* %a, <2 x i64>* %m_ptr, <2 x i64>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -141,7 +141,7 @@ define void @masked_load_v4i8(<4 x i8>* %a, <4 x i8>* %m_ptr, <4 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -158,7 +158,7 @@ define void @masked_load_v4i16(<4 x i16>* %a, <4 x i16>* %m_ptr, <4 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -175,7 +175,7 @@ define void @masked_load_v4i32(<4 x i32>* %a, <4 x i32>* %m_ptr, <4 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -192,7 +192,7 @@ define void @masked_load_v4i64(<4 x i64>* %a, <4 x i64>* %m_ptr, <4 x i64>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -209,7 +209,7 @@ define void @masked_load_v8i8(<8 x i8>* %a, <8 x i8>* %m_ptr, <8 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -226,7 +226,7 @@ define void @masked_load_v8i16(<8 x i16>* %a, <8 x i16>* %m_ptr, <8 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -243,7 +243,7 @@ define void @masked_load_v8i32(<8 x i32>* %a, <8 x i32>* %m_ptr, <8 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -260,7 +260,7 @@ define void @masked_load_v8i64(<8 x i64>* %a, <8 x i64>* %m_ptr, <8 x i64>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -277,7 +277,7 @@ define void @masked_load_v16i8(<16 x i8>* %a, <16 x i8>* %m_ptr, <16 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -294,7 +294,7 @@ define void @masked_load_v16i16(<16 x i16>* %a, <16 x i16>* %m_ptr, <16 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -311,7 +311,7 @@ define void @masked_load_v16i32(<16 x i32>* %a, <16 x i32>* %m_ptr, <16 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -328,7 +328,7 @@ define void @masked_load_v16i64(<16 x i64>* %a, <16 x i64>* %m_ptr, <16 x i64>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -346,7 +346,7 @@ ; CHECK-LABEL: masked_load_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -364,7 +364,7 @@ ; CHECK-LABEL: masked_load_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -382,7 +382,7 @@ ; CHECK-LABEL: masked_load_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -406,7 +406,7 @@ ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, 0 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v8, v0, v24 ; RV32-NEXT: vmseq.vv v0, v16, v24 ; RV32-NEXT: addi a1, a0, 128 @@ -421,7 +421,7 @@ ; RV64-LABEL: masked_load_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: addi a3, a1, 128 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: vle64.v v24, (a3) ; RV64-NEXT: vmseq.vi v8, v16, 0 @@ -446,7 +446,7 @@ ; CHECK-LABEL: masked_load_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -464,7 +464,7 @@ ; CHECK-LABEL: masked_load_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle16.v v8, (a0), v0.t @@ -483,7 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 ; CHECK-NEXT: li a4, 32 -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vle32.v v24, (a3) ; CHECK-NEXT: vmseq.vi v8, v16, 0 @@ -508,7 +508,7 @@ ; CHECK-LABEL: masked_load_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vle8.v v8, (a0), v0.t @@ -527,7 +527,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 ; CHECK-NEXT: li a4, 128 -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vle8.v v24, (a3) ; CHECK-NEXT: vmseq.vi v8, v16, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -31,13 +34,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -53,13 +59,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -73,13 +82,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,13 +107,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -115,13 +130,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -135,17 +153,20 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -159,15 +180,18 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -183,13 +207,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -203,13 +230,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -225,13 +255,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -245,13 +278,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -265,17 +301,20 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -289,15 +328,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -311,17 +353,20 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -335,15 +380,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -359,13 +407,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -379,13 +430,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -399,17 +453,20 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -423,15 +480,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -445,17 +505,20 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -469,15 +532,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -491,17 +557,20 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v15f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -515,15 +584,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -537,17 +609,20 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -561,15 +636,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -583,57 +661,69 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v2, v0, 2 +; CHECK-NEXT: vslidedown.vi v25, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 4 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -644,55 +734,71 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v32f64_unmasked: ; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: vmset.m v24 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v25, v24 +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 4 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add sp, sp, a2 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -31,13 +34,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -53,13 +59,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -73,13 +82,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,13 +107,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -115,13 +130,16 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -135,17 +153,20 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -159,15 +180,18 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -183,13 +207,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -203,13 +230,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -225,13 +255,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -245,13 +278,16 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -265,17 +301,20 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -289,15 +328,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -311,17 +353,20 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -335,15 +380,18 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -359,13 +407,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -379,13 +430,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -399,17 +453,20 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -423,15 +480,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -445,17 +505,20 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -469,15 +532,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -491,17 +557,20 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v15f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -515,15 +584,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -537,17 +609,20 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -561,15 +636,18 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -583,57 +661,69 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v2, v0, 2 +; CHECK-NEXT: vslidedown.vi v25, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 0 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -644,55 +734,71 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v32f64_unmasked: ; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: vmset.m v24 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v25, v24 +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 0 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add sp, sp, a2 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -9,7 +9,7 @@ define <4 x i16> @vsext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define <4 x i32> @vsext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define <4 x i64> @vsext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define <4 x i32> @vsext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define <4 x i64> @vsext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -129,7 +129,7 @@ define <4 x i64> @vsext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -164,14 +164,14 @@ ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vsext.vf2 v16, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB12_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vsext.vf2 v24, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -9,7 +9,7 @@ define <4 x half> @vsitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -36,7 +36,7 @@ define <4 x float> @vsitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -63,7 +63,7 @@ define <4 x double> @vsitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v9, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i7(<4 x i7> %va, <4 x i1> %m, i32 %evl) @@ -24,7 +24,7 @@ define <4 x half> @vsitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define <4 x half> @vsitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) @@ -70,7 +70,7 @@ define <4 x half> @vsitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -94,9 +94,9 @@ define <4 x half> @vsitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) @@ -120,7 +120,7 @@ define <4 x float> @vsitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret @@ -144,7 +144,7 @@ define <4 x float> @vsitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -168,7 +168,7 @@ define <4 x float> @vsitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) @@ -190,7 +190,7 @@ define <4 x float> @vsitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -214,7 +214,7 @@ define <4 x double> @vsitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -238,7 +238,7 @@ define <4 x double> @vsitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -262,7 +262,7 @@ define <4 x double> @vsitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define <4 x double> @vsitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) @@ -317,14 +317,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -11,13 +11,13 @@ define <2 x i8> @strided_vpload_v2i8_i8(i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8_i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i8_i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl) @@ -29,13 +29,13 @@ define <2 x i8> @strided_vpload_v2i8_i16(i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8_i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i8_i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl) @@ -47,13 +47,13 @@ define <2 x i8> @strided_vpload_v2i8_i64(i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8_i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i8_i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl) @@ -65,13 +65,13 @@ define <2 x i8> @strided_vpload_v2i8(i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -83,13 +83,13 @@ define <4 x i8> @strided_vpload_v4i8(i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -119,13 +119,13 @@ define <8 x i8> @strided_vpload_v8i8(i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -137,13 +137,13 @@ define <2 x i16> @strided_vpload_v2i16(i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -155,13 +155,13 @@ define <4 x i16> @strided_vpload_v4i16(i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -173,13 +173,13 @@ define <8 x i16> @strided_vpload_v8i16(i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -209,13 +209,13 @@ define <2 x i32> @strided_vpload_v2i32(i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -227,13 +227,13 @@ define <4 x i32> @strided_vpload_v4i32(i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -245,13 +245,13 @@ define <8 x i32> @strided_vpload_v8i32(i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -281,13 +281,13 @@ define <2 x i64> @strided_vpload_v2i64(i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -299,13 +299,13 @@ define <4 x i64> @strided_vpload_v4i64(i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -335,13 +335,13 @@ define <8 x i64> @strided_vpload_v8i64(i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -353,13 +353,13 @@ define <2 x half> @strided_vpload_v2f16(half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -389,13 +389,13 @@ define <4 x half> @strided_vpload_v4f16(half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -407,13 +407,13 @@ define <8 x half> @strided_vpload_v8f16(half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -425,13 +425,13 @@ define <2 x float> @strided_vpload_v2f32(float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -443,13 +443,13 @@ define <4 x float> @strided_vpload_v4f32(float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -461,13 +461,13 @@ define <8 x float> @strided_vpload_v8f32(float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -497,13 +497,13 @@ define <2 x double> @strided_vpload_v2f64(double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -515,13 +515,13 @@ define <4 x double> @strided_vpload_v4f64(double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -551,13 +551,13 @@ define <8 x double> @strided_vpload_v8f64(double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -568,13 +568,13 @@ define <3 x double> @strided_vpload_v3f64(double* %ptr, i32 signext %stride, <3 x i1> %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v3f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v3f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0f64.i32(double* %ptr, i32 %stride, <3 x i1> %mask, i32 %evl) @@ -621,9 +621,9 @@ ; CHECK-RV32-NEXT: add a4, a0, a4 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret @@ -646,9 +646,9 @@ ; CHECK-RV64-NEXT: add a4, a0, a4 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret @@ -733,7 +733,7 @@ ; CHECK-RV32-NEXT: add t0, a1, t0 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (t0), a2, v0.t ; CHECK-RV32-NEXT: addi t0, a4, -32 ; CHECK-RV32-NEXT: li a7, 0 @@ -749,9 +749,9 @@ ; CHECK-RV32-NEXT: add a3, a1, a3 ; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 -; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v24, (a3), a2, v0.t -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t ; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma @@ -789,7 +789,7 @@ ; CHECK-RV64-NEXT: add t0, a1, t0 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (t0), a2, v0.t ; CHECK-RV64-NEXT: addi t0, a3, -32 ; CHECK-RV64-NEXT: li a7, 0 @@ -805,9 +805,9 @@ ; CHECK-RV64-NEXT: add a3, a1, a3 ; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 -; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t ; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll @@ -9,7 +9,7 @@ define <4 x half> @vuitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -36,7 +36,7 @@ define <4 x float> @vuitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -63,7 +63,7 @@ define <4 x double> @vuitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v9, v8, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i7(<4 x i7> %va, <4 x i1> %m, i32 %evl) @@ -24,7 +24,7 @@ define <4 x half> @vuitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define <4 x half> @vuitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) @@ -70,7 +70,7 @@ define <4 x half> @vuitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -94,9 +94,9 @@ define <4 x half> @vuitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) @@ -120,7 +120,7 @@ define <4 x float> @vuitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret @@ -144,7 +144,7 @@ define <4 x float> @vuitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -168,7 +168,7 @@ define <4 x float> @vuitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) @@ -190,7 +190,7 @@ define <4 x float> @vuitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -214,7 +214,7 @@ define <4 x double> @vuitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -238,7 +238,7 @@ define <4 x double> @vuitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -262,7 +262,7 @@ define <4 x double> @vuitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define <4 x double> @vuitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) @@ -317,14 +317,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.add.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vadd_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ define <2 x i8> @vadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 @@ -97,7 +97,7 @@ define <4 x i8> @vadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -119,7 +119,7 @@ define <4 x i8> @vadd_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -131,7 +131,7 @@ define <4 x i8> @vadd_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -157,7 +157,7 @@ define <4 x i8> @vadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 @@ -185,7 +185,7 @@ define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl) @@ -207,7 +207,7 @@ define <5 x i8> @vadd_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0 @@ -233,7 +233,7 @@ define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0 @@ -261,7 +261,7 @@ define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -283,7 +283,7 @@ define <8 x i8> @vadd_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -309,7 +309,7 @@ define <8 x i8> @vadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 @@ -337,7 +337,7 @@ define <16 x i8> @vadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -359,7 +359,7 @@ define <16 x i8> @vadd_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -385,7 +385,7 @@ define <16 x i8> @vadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 @@ -423,14 +423,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: bltu a1, a2, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret @@ -473,10 +473,10 @@ ; CHECK-LABEL: vadd_vi_v258i8_evl129: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ ; CHECK-LABEL: vadd_vi_v258i8_evl128: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 128 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0 @@ -506,7 +506,7 @@ define <2 x i16> @vadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -528,7 +528,7 @@ define <2 x i16> @vadd_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -554,7 +554,7 @@ define <2 x i16> @vadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 @@ -582,7 +582,7 @@ define <4 x i16> @vadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -604,7 +604,7 @@ define <4 x i16> @vadd_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -630,7 +630,7 @@ define <4 x i16> @vadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 @@ -658,7 +658,7 @@ define <8 x i16> @vadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -680,7 +680,7 @@ define <8 x i16> @vadd_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -706,7 +706,7 @@ define <8 x i16> @vadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 @@ -734,7 +734,7 @@ define <16 x i16> @vadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -756,7 +756,7 @@ define <16 x i16> @vadd_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -782,7 +782,7 @@ define <16 x i16> @vadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 @@ -810,7 +810,7 @@ define <2 x i32> @vadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -832,7 +832,7 @@ define <2 x i32> @vadd_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -858,7 +858,7 @@ define <2 x i32> @vadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 @@ -886,7 +886,7 @@ define <4 x i32> @vadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -908,7 +908,7 @@ define <4 x i32> @vadd_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -934,7 +934,7 @@ define <4 x i32> @vadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 @@ -962,7 +962,7 @@ define <8 x i32> @vadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -984,7 +984,7 @@ define <8 x i32> @vadd_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -1010,7 +1010,7 @@ define <8 x i32> @vadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 @@ -1038,7 +1038,7 @@ define <16 x i32> @vadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -1060,7 +1060,7 @@ define <16 x i32> @vadd_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -1086,7 +1086,7 @@ define <16 x i32> @vadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 @@ -1114,7 +1114,7 @@ define <2 x i64> @vadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -1143,14 +1143,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1190,7 +1190,7 @@ define <2 x i64> @vadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 @@ -1218,7 +1218,7 @@ define <4 x i64> @vadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1247,14 +1247,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1294,7 +1294,7 @@ define <4 x i64> @vadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 @@ -1322,7 +1322,7 @@ define <8 x i64> @vadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1351,14 +1351,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1398,7 +1398,7 @@ define <8 x i64> @vadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 @@ -1426,7 +1426,7 @@ define <16 x i64> @vadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1455,14 +1455,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1502,7 +1502,7 @@ define <16 x i64> @vadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 @@ -1544,14 +1544,14 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB108_2: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t ; RV32-NEXT: bltu a0, a1, .LBB108_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB108_4: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t ; RV32-NEXT: ret @@ -1567,14 +1567,14 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB108_2: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: li a1, 16 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t ; RV64-NEXT: bltu a0, a1, .LBB108_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB108_4: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t ; RV64-NEXT: ret @@ -1641,13 +1641,13 @@ ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v32i64_evl12: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0 @@ -1664,9 +1664,9 @@ ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t -; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t ; RV32-NEXT: ret @@ -1675,9 +1675,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v24, v0, 2 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t -; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.and.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vand_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vand_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -95,7 +95,7 @@ define <2 x i8> @vand_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 @@ -123,7 +123,7 @@ define <4 x i8> @vand_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -145,7 +145,7 @@ define <4 x i8> @vand_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ define <4 x i8> @vand_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 4, i32 0 @@ -199,7 +199,7 @@ define <8 x i8> @vand_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -221,7 +221,7 @@ define <8 x i8> @vand_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define <8 x i8> @vand_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 @@ -275,7 +275,7 @@ define <16 x i8> @vand_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x i8> @vand_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -323,7 +323,7 @@ define <16 x i8> @vand_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 4, i32 0 @@ -351,7 +351,7 @@ define <2 x i16> @vand_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -373,7 +373,7 @@ define <2 x i16> @vand_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -399,7 +399,7 @@ define <2 x i16> @vand_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 4, i32 0 @@ -427,7 +427,7 @@ define <4 x i16> @vand_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -449,7 +449,7 @@ define <4 x i16> @vand_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -475,7 +475,7 @@ define <4 x i16> @vand_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 4, i32 0 @@ -503,7 +503,7 @@ define <8 x i16> @vand_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -525,7 +525,7 @@ define <8 x i16> @vand_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -551,7 +551,7 @@ define <8 x i16> @vand_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 4, i32 0 @@ -579,7 +579,7 @@ define <16 x i16> @vand_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -601,7 +601,7 @@ define <16 x i16> @vand_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -627,7 +627,7 @@ define <16 x i16> @vand_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 4, i32 0 @@ -655,7 +655,7 @@ define <2 x i32> @vand_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -677,7 +677,7 @@ define <2 x i32> @vand_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -703,7 +703,7 @@ define <2 x i32> @vand_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 4, i32 0 @@ -731,7 +731,7 @@ define <4 x i32> @vand_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -753,7 +753,7 @@ define <4 x i32> @vand_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -779,7 +779,7 @@ define <4 x i32> @vand_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 4, i32 0 @@ -807,7 +807,7 @@ define <8 x i32> @vand_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -829,7 +829,7 @@ define <8 x i32> @vand_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -855,7 +855,7 @@ define <8 x i32> @vand_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 @@ -883,7 +883,7 @@ define <16 x i32> @vand_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -905,7 +905,7 @@ define <16 x i32> @vand_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -931,7 +931,7 @@ define <16 x i32> @vand_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 4, i32 0 @@ -959,7 +959,7 @@ define <2 x i64> @vand_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -988,14 +988,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1035,7 +1035,7 @@ define <2 x i64> @vand_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 4, i32 0 @@ -1063,7 +1063,7 @@ define <4 x i64> @vand_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1092,14 +1092,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1139,7 +1139,7 @@ define <4 x i64> @vand_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 4, i32 0 @@ -1167,7 +1167,7 @@ define <8 x i64> @vand_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1196,14 +1196,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1243,7 +1243,7 @@ define <8 x i64> @vand_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 @@ -1271,7 +1271,7 @@ define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v11i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl) @@ -1303,14 +1303,14 @@ ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vmerge.vxm v24, v24, a0, v0 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: vand.vv v8, v8, v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v11i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <11 x i64> poison, i64 %b, i32 0 @@ -1351,7 +1351,7 @@ define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v11i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <11 x i64> poison, i64 4, i32 0 @@ -1379,7 +1379,7 @@ define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1408,14 +1408,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1455,7 +1455,7 @@ define <16 x i64> @vand_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) @@ -352,7 +352,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -364,7 +364,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -26,7 +26,7 @@ define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -48,7 +48,7 @@ define <2 x i8> @vdiv_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -76,7 +76,7 @@ define <4 x i8> @vdiv_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -98,7 +98,7 @@ define <4 x i8> @vdiv_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -126,7 +126,7 @@ define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) @@ -138,7 +138,7 @@ define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -160,7 +160,7 @@ define <8 x i8> @vdiv_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -188,7 +188,7 @@ define <16 x i8> @vdiv_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -210,7 +210,7 @@ define <16 x i8> @vdiv_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -238,7 +238,7 @@ define <2 x i16> @vdiv_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -260,7 +260,7 @@ define <2 x i16> @vdiv_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -288,7 +288,7 @@ define <4 x i16> @vdiv_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -310,7 +310,7 @@ define <4 x i16> @vdiv_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -338,7 +338,7 @@ define <8 x i16> @vdiv_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -360,7 +360,7 @@ define <8 x i16> @vdiv_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -388,7 +388,7 @@ define <16 x i16> @vdiv_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -410,7 +410,7 @@ define <16 x i16> @vdiv_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -438,7 +438,7 @@ define <2 x i32> @vdiv_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -460,7 +460,7 @@ define <2 x i32> @vdiv_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -488,7 +488,7 @@ define <4 x i32> @vdiv_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -510,7 +510,7 @@ define <4 x i32> @vdiv_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -538,7 +538,7 @@ define <8 x i32> @vdiv_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -560,7 +560,7 @@ define <8 x i32> @vdiv_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -588,7 +588,7 @@ define <16 x i32> @vdiv_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -610,7 +610,7 @@ define <16 x i32> @vdiv_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ define <2 x i64> @vdiv_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -667,14 +667,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -716,7 +716,7 @@ define <4 x i64> @vdiv_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -745,14 +745,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -794,7 +794,7 @@ define <8 x i64> @vdiv_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -823,14 +823,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -872,7 +872,7 @@ define <16 x i64> @vdiv_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -901,14 +901,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -25,7 +25,7 @@ define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -47,7 +47,7 @@ define <2 x i8> @vdivu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -75,7 +75,7 @@ define <4 x i8> @vdivu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -97,7 +97,7 @@ define <4 x i8> @vdivu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -125,7 +125,7 @@ define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) @@ -137,7 +137,7 @@ define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -159,7 +159,7 @@ define <8 x i8> @vdivu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -187,7 +187,7 @@ define <16 x i8> @vdivu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -209,7 +209,7 @@ define <16 x i8> @vdivu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -237,7 +237,7 @@ define <2 x i16> @vdivu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -259,7 +259,7 @@ define <2 x i16> @vdivu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -287,7 +287,7 @@ define <4 x i16> @vdivu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -309,7 +309,7 @@ define <4 x i16> @vdivu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -337,7 +337,7 @@ define <8 x i16> @vdivu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -359,7 +359,7 @@ define <8 x i16> @vdivu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -387,7 +387,7 @@ define <16 x i16> @vdivu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -409,7 +409,7 @@ define <16 x i16> @vdivu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -437,7 +437,7 @@ define <2 x i32> @vdivu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -459,7 +459,7 @@ define <2 x i32> @vdivu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -487,7 +487,7 @@ define <4 x i32> @vdivu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -509,7 +509,7 @@ define <4 x i32> @vdivu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -537,7 +537,7 @@ define <8 x i32> @vdivu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -559,7 +559,7 @@ define <8 x i32> @vdivu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -587,7 +587,7 @@ define <16 x i32> @vdivu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -609,7 +609,7 @@ define <16 x i32> @vdivu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -637,7 +637,7 @@ define <2 x i64> @vdivu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -666,14 +666,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -715,7 +715,7 @@ define <4 x i64> @vdivu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -744,14 +744,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -793,7 +793,7 @@ define <8 x i64> @vdivu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -822,14 +822,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -871,7 +871,7 @@ define <16 x i64> @vdivu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -900,14 +900,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fabs.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fabs.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fabs.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fabs.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfabs_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfabs_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfabs_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfabs_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfabs_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfabs_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfabs_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfabs_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfabs_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -330,14 +330,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfabs.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) @@ -31,7 +31,7 @@ define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -59,7 +59,7 @@ define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v3f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fadd.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) @@ -71,7 +71,7 @@ define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -171,7 +171,7 @@ define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -221,7 +221,7 @@ define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -255,7 +255,7 @@ define <2 x float> @vfadd_vf_v2f32_commute(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -297,7 +297,7 @@ define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) @@ -319,7 +319,7 @@ define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -347,7 +347,7 @@ define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) @@ -369,7 +369,7 @@ define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -397,7 +397,7 @@ define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) @@ -419,7 +419,7 @@ define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -447,7 +447,7 @@ define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) @@ -469,7 +469,7 @@ define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -497,7 +497,7 @@ define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) @@ -519,7 +519,7 @@ define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -547,7 +547,7 @@ define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) @@ -569,7 +569,7 @@ define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -597,7 +597,7 @@ define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) @@ -619,7 +619,7 @@ define <16 x double> @vfadd_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) @@ -31,7 +31,7 @@ define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -59,7 +59,7 @@ define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v3f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fdiv.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) @@ -71,7 +71,7 @@ define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -171,7 +171,7 @@ define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -221,7 +221,7 @@ define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -271,7 +271,7 @@ define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) @@ -293,7 +293,7 @@ define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -321,7 +321,7 @@ define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) @@ -343,7 +343,7 @@ define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -371,7 +371,7 @@ define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) @@ -393,7 +393,7 @@ define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -421,7 +421,7 @@ define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) @@ -443,7 +443,7 @@ define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -471,7 +471,7 @@ define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) @@ -493,7 +493,7 @@ define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -521,7 +521,7 @@ define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) @@ -543,7 +543,7 @@ define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -571,7 +571,7 @@ define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) @@ -593,7 +593,7 @@ define <16 x double> @vfdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.maxnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.maxnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.maxnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.maxnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfmax_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) @@ -352,7 +352,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -364,7 +364,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) @@ -352,7 +352,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -364,7 +364,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) @@ -31,7 +31,7 @@ define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -59,7 +59,7 @@ define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v3f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fmul.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) @@ -71,7 +71,7 @@ define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -171,7 +171,7 @@ define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -221,7 +221,7 @@ define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -271,7 +271,7 @@ define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) @@ -293,7 +293,7 @@ define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -321,7 +321,7 @@ define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) @@ -343,7 +343,7 @@ define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -371,7 +371,7 @@ define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) @@ -393,7 +393,7 @@ define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -421,7 +421,7 @@ define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) @@ -443,7 +443,7 @@ define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -471,7 +471,7 @@ define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) @@ -493,7 +493,7 @@ define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -521,7 +521,7 @@ define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) @@ -543,7 +543,7 @@ define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -571,7 +571,7 @@ define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) @@ -593,7 +593,7 @@ define <16 x double> @vfmul_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfneg_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfneg_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfneg_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfneg_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfneg_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfneg_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfneg_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -330,14 +330,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfneg.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -37,7 +37,7 @@ define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -65,7 +65,7 @@ define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -93,7 +93,7 @@ define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -149,7 +149,7 @@ define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -177,7 +177,7 @@ define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -205,7 +205,7 @@ define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -233,7 +233,7 @@ define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -261,7 +261,7 @@ define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -289,7 +289,7 @@ define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -317,7 +317,7 @@ define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -37,7 +37,7 @@ define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -65,7 +65,7 @@ define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -93,7 +93,7 @@ define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -149,7 +149,7 @@ define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -177,7 +177,7 @@ define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -205,7 +205,7 @@ define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -233,7 +233,7 @@ define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -261,7 +261,7 @@ define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -289,7 +289,7 @@ define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -317,7 +317,7 @@ define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfsqrt_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.sqrt.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x half> @vfsqrt_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sqrt.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x half> @vfsqrt_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.sqrt.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <16 x half> @vfsqrt_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.sqrt.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define <2 x float> @vfsqrt_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.sqrt.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x float> @vfsqrt_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define <8 x float> @vfsqrt_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.sqrt.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define <16 x float> @vfsqrt_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.sqrt.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <2 x double> @vfsqrt_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.sqrt.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <4 x double> @vfsqrt_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.sqrt.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define <8 x double> @vfsqrt_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.sqrt.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <15 x double> @vfsqrt_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.sqrt.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <16 x double> @vfsqrt_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.sqrt.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) @@ -330,14 +330,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfsqrt.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) @@ -31,7 +31,7 @@ define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -59,7 +59,7 @@ define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v3f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) @@ -71,7 +71,7 @@ define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -121,7 +121,7 @@ define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -171,7 +171,7 @@ define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -221,7 +221,7 @@ define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -271,7 +271,7 @@ define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) @@ -293,7 +293,7 @@ define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -321,7 +321,7 @@ define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) @@ -343,7 +343,7 @@ define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -371,7 +371,7 @@ define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) @@ -393,7 +393,7 @@ define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -421,7 +421,7 @@ define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) @@ -443,7 +443,7 @@ define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -471,7 +471,7 @@ define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) @@ -493,7 +493,7 @@ define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -521,7 +521,7 @@ define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) @@ -543,7 +543,7 @@ define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -571,7 +571,7 @@ define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) @@ -593,7 +593,7 @@ define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.mul.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vmul_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ define <4 x i8> @vmul_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <4 x i8> @vmul_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -121,7 +121,7 @@ define <8 x i8> @vmul_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <8 x i8> @vmul_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ define <16 x i8> @vmul_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <16 x i8> @vmul_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -221,7 +221,7 @@ define <2 x i16> @vmul_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <2 x i16> @vmul_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -271,7 +271,7 @@ define <4 x i16> @vmul_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -293,7 +293,7 @@ define <4 x i16> @vmul_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -321,7 +321,7 @@ define <8 x i16> @vmul_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -343,7 +343,7 @@ define <8 x i16> @vmul_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -355,7 +355,7 @@ define <8 x i16> @vmul_vx_v8i16_commute(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -383,7 +383,7 @@ define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v12i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl) @@ -405,7 +405,7 @@ define <12 x i16> @vmul_vx_v12i16(<12 x i16> %va, i16 %b, <12 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v12i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <12 x i16> poison, i16 %b, i32 0 @@ -433,7 +433,7 @@ define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -455,7 +455,7 @@ define <16 x i16> @vmul_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -483,7 +483,7 @@ define <2 x i32> @vmul_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -505,7 +505,7 @@ define <2 x i32> @vmul_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -533,7 +533,7 @@ define <4 x i32> @vmul_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -555,7 +555,7 @@ define <4 x i32> @vmul_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -583,7 +583,7 @@ define <8 x i32> @vmul_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -605,7 +605,7 @@ define <8 x i32> @vmul_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -633,7 +633,7 @@ define <16 x i32> @vmul_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -655,7 +655,7 @@ define <16 x i32> @vmul_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -683,7 +683,7 @@ define <2 x i64> @vmul_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -712,14 +712,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -761,7 +761,7 @@ define <4 x i64> @vmul_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -790,14 +790,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -839,7 +839,7 @@ define <8 x i64> @vmul_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -868,14 +868,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -917,7 +917,7 @@ define <16 x i64> @vmul_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -946,14 +946,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.or.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ define <2 x i8> @vor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 5, i32 0 @@ -97,7 +97,7 @@ define <4 x i8> @vor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -119,7 +119,7 @@ define <4 x i8> @vor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -131,7 +131,7 @@ define <4 x i8> @vor_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -157,7 +157,7 @@ define <4 x i8> @vor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 5, i32 0 @@ -185,7 +185,7 @@ define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) @@ -207,7 +207,7 @@ define <7 x i8> @vor_vx_v5i8(<7 x i8> %va, i8 %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> poison, i8 %b, i32 0 @@ -233,7 +233,7 @@ define <7 x i8> @vor_vi_v5i8(<7 x i8> %va, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> poison, i8 5, i32 0 @@ -261,7 +261,7 @@ define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -283,7 +283,7 @@ define <8 x i8> @vor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -309,7 +309,7 @@ define <8 x i8> @vor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 5, i32 0 @@ -337,7 +337,7 @@ define <16 x i8> @vor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -359,7 +359,7 @@ define <16 x i8> @vor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -385,7 +385,7 @@ define <16 x i8> @vor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 5, i32 0 @@ -413,7 +413,7 @@ define <2 x i16> @vor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -435,7 +435,7 @@ define <2 x i16> @vor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -461,7 +461,7 @@ define <2 x i16> @vor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 5, i32 0 @@ -489,7 +489,7 @@ define <4 x i16> @vor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -511,7 +511,7 @@ define <4 x i16> @vor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -537,7 +537,7 @@ define <4 x i16> @vor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 5, i32 0 @@ -565,7 +565,7 @@ define <8 x i16> @vor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -587,7 +587,7 @@ define <8 x i16> @vor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -613,7 +613,7 @@ define <8 x i16> @vor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 5, i32 0 @@ -641,7 +641,7 @@ define <16 x i16> @vor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -663,7 +663,7 @@ define <16 x i16> @vor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -689,7 +689,7 @@ define <16 x i16> @vor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 5, i32 0 @@ -717,7 +717,7 @@ define <2 x i32> @vor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -739,7 +739,7 @@ define <2 x i32> @vor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -765,7 +765,7 @@ define <2 x i32> @vor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 5, i32 0 @@ -793,7 +793,7 @@ define <4 x i32> @vor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -815,7 +815,7 @@ define <4 x i32> @vor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -841,7 +841,7 @@ define <4 x i32> @vor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 5, i32 0 @@ -869,7 +869,7 @@ define <8 x i32> @vor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -891,7 +891,7 @@ define <8 x i32> @vor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -917,7 +917,7 @@ define <8 x i32> @vor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 5, i32 0 @@ -945,7 +945,7 @@ define <16 x i32> @vor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -967,7 +967,7 @@ define <16 x i32> @vor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -993,7 +993,7 @@ define <16 x i32> @vor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 5, i32 0 @@ -1021,7 +1021,7 @@ define <2 x i64> @vor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -1050,14 +1050,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1097,7 +1097,7 @@ define <2 x i64> @vor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 5, i32 0 @@ -1125,7 +1125,7 @@ define <4 x i64> @vor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1154,14 +1154,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1201,7 +1201,7 @@ define <4 x i64> @vor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 5, i32 0 @@ -1229,7 +1229,7 @@ define <8 x i64> @vor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1258,14 +1258,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1305,7 +1305,7 @@ define <8 x i64> @vor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 5, i32 0 @@ -1333,7 +1333,7 @@ define <16 x i64> @vor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1362,14 +1362,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1409,7 +1409,7 @@ define <16 x i64> @vor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -9,14 +9,14 @@ define <2 x i8> @vpgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -27,7 +27,7 @@ define <2 x i16> @vpgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_sextload_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 @@ -35,7 +35,7 @@ ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 @@ -48,7 +48,7 @@ define <2 x i16> @vpgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_zextload_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 @@ -56,7 +56,7 @@ ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 @@ -69,7 +69,7 @@ define <2 x i32> @vpgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_sextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 @@ -77,7 +77,7 @@ ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsext.vf4 v8, v9 @@ -90,7 +90,7 @@ define <2 x i32> @vpgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_zextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 @@ -98,7 +98,7 @@ ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vzext.vf4 v8, v9 @@ -111,7 +111,7 @@ define <2 x i64> @vpgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf8 v8, v9 @@ -119,7 +119,7 @@ ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf8 v8, v9 @@ -132,7 +132,7 @@ define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf8 v8, v9 @@ -140,7 +140,7 @@ ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf8 v8, v9 @@ -155,14 +155,14 @@ define <3 x i8> @vpgather_v3i8(<3 x i8*> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v3i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v3i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -195,14 +195,14 @@ define <4 x i8> @vpgather_v4i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -235,14 +235,14 @@ define <8 x i8> @vpgather_v8i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -255,7 +255,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 -; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -263,7 +263,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 -; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs @@ -279,7 +279,7 @@ ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -298,7 +298,7 @@ ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 -; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB13_4 @@ -307,7 +307,7 @@ ; RV64-NEXT: .LBB13_4: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 @@ -324,14 +324,14 @@ define <2 x i16> @vpgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -342,7 +342,7 @@ define <2 x i32> @vpgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16_sextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 @@ -350,7 +350,7 @@ ; ; RV64-LABEL: vpgather_v2i16_sextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 @@ -363,7 +363,7 @@ define <2 x i32> @vpgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16_zextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 @@ -371,7 +371,7 @@ ; ; RV64-LABEL: vpgather_v2i16_zextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 @@ -384,7 +384,7 @@ define <2 x i64> @vpgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 @@ -392,7 +392,7 @@ ; ; RV64-LABEL: vpgather_v2i16_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf4 v8, v9 @@ -405,7 +405,7 @@ define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 @@ -413,7 +413,7 @@ ; ; RV64-LABEL: vpgather_v2i16_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf4 v8, v9 @@ -428,14 +428,14 @@ define <4 x i16> @vpgather_v4i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -468,14 +468,14 @@ define <8 x i16> @vpgather_v8i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -489,7 +489,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -498,7 +498,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs @@ -512,7 +512,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -521,7 +521,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> @@ -536,7 +536,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -545,7 +545,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> @@ -560,7 +560,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -569,7 +569,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs @@ -582,13 +582,13 @@ define <2 x i32> @vpgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -599,7 +599,7 @@ define <2 x i64> @vpgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i32_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 @@ -607,7 +607,7 @@ ; ; RV64-LABEL: vpgather_v2i32_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 @@ -620,7 +620,7 @@ define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i32_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 @@ -628,7 +628,7 @@ ; ; RV64-LABEL: vpgather_v2i32_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 @@ -643,13 +643,13 @@ define <4 x i32> @vpgather_v4i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -681,13 +681,13 @@ define <8 x i32> @vpgather_v8i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -701,7 +701,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -710,7 +710,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs @@ -724,7 +724,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -733,7 +733,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> @@ -748,7 +748,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -757,7 +757,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> @@ -772,7 +772,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -781,7 +781,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs @@ -795,7 +795,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -804,7 +804,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> @@ -819,7 +819,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -828,7 +828,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> @@ -842,7 +842,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -851,7 +851,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs @@ -864,14 +864,14 @@ define <2 x i64> @vpgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) @@ -883,14 +883,14 @@ define <4 x i64> @vpgather_v4i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) @@ -921,14 +921,14 @@ define <8 x i64> @vpgather_v8i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -941,7 +941,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -950,7 +950,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs @@ -964,7 +964,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -973,7 +973,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> @@ -988,7 +988,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -997,7 +997,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> @@ -1012,7 +1012,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1021,7 +1021,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs @@ -1035,7 +1035,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1044,7 +1044,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> @@ -1059,7 +1059,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1068,7 +1068,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> @@ -1082,7 +1082,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1091,7 +1091,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs @@ -1104,7 +1104,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1113,7 +1113,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> @@ -1127,7 +1127,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1136,7 +1136,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> @@ -1151,7 +1151,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1159,7 +1159,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs @@ -1172,14 +1172,14 @@ define <2 x half> @vpgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1192,14 +1192,14 @@ define <4 x half> @vpgather_v4f16(<4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1232,14 +1232,14 @@ define <8 x half> @vpgather_v8f16(<8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1253,7 +1253,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -1262,7 +1262,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs @@ -1276,7 +1276,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -1285,7 +1285,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> @@ -1300,7 +1300,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -1309,7 +1309,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> @@ -1324,7 +1324,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; @@ -1333,7 +1333,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs @@ -1346,13 +1346,13 @@ define <2 x float> @vpgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1365,13 +1365,13 @@ define <4 x float> @vpgather_v4f32(<4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -1403,13 +1403,13 @@ define <8 x float> @vpgather_v8f32(<8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1423,7 +1423,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1432,7 +1432,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs @@ -1446,7 +1446,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1455,7 +1455,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> @@ -1470,7 +1470,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1479,7 +1479,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> @@ -1494,7 +1494,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1503,7 +1503,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs @@ -1517,7 +1517,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1526,7 +1526,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> @@ -1541,7 +1541,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1550,7 +1550,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> @@ -1564,7 +1564,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1573,7 +1573,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs @@ -1586,14 +1586,14 @@ define <2 x double> @vpgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*> %ptrs, <2 x i1> %m, i32 %evl) @@ -1605,14 +1605,14 @@ define <4 x double> @vpgather_v4f64(<4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %m, i32 %evl) @@ -1643,14 +1643,14 @@ define <8 x double> @vpgather_v8f64(<8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1663,7 +1663,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1672,7 +1672,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs @@ -1686,7 +1686,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1695,7 +1695,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> @@ -1710,7 +1710,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1719,7 +1719,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> @@ -1734,7 +1734,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1743,7 +1743,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs @@ -1757,7 +1757,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1766,7 +1766,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> @@ -1781,7 +1781,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1790,7 +1790,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> @@ -1804,7 +1804,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1813,7 +1813,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs @@ -1826,7 +1826,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1835,7 +1835,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> @@ -1849,7 +1849,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1858,7 +1858,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> @@ -1873,7 +1873,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1881,7 +1881,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs @@ -1905,14 +1905,14 @@ ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v1, 2 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (zero), v24, v0.t ; RV32-NEXT: li a1, 16 ; RV32-NEXT: bltu a0, a1, .LBB86_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB86_4: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v24 @@ -1929,14 +1929,14 @@ ; RV64-NEXT: .LBB86_2: ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t ; RV64-NEXT: li a1, 16 ; RV64-NEXT: bltu a0, a1, .LBB86_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB86_4: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret @@ -1957,7 +1957,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB87_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -1969,7 +1969,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -1991,14 +1991,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB87_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB87_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2020,7 +2020,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB88_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2032,7 +2032,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2054,14 +2054,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB88_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB88_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2084,7 +2084,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB89_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2096,7 +2096,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2118,14 +2118,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB89_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB89_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2148,7 +2148,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB90_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2160,7 +2160,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2182,14 +2182,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB90_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB90_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2211,7 +2211,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB91_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2223,7 +2223,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2245,14 +2245,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB91_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB91_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2275,7 +2275,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB92_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2287,7 +2287,7 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2309,14 +2309,14 @@ ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB92_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB92_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2338,7 +2338,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB93_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2350,7 +2350,7 @@ ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2372,14 +2372,14 @@ ; RV64-NEXT: vsll.vi v8, v0, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB93_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB93_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret @@ -2400,7 +2400,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB94_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2412,7 +2412,7 @@ ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2442,14 +2442,14 @@ ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB94_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB94_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: csrr a0, vlenb @@ -2475,7 +2475,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB95_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: li a2, 0 @@ -2487,7 +2487,7 @@ ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; @@ -2517,14 +2517,14 @@ ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB95_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB95_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: csrr a0, vlenb @@ -2560,14 +2560,14 @@ ; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v1, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: li a2, 16 ; RV32-NEXT: bltu a1, a2, .LBB96_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB96_4: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2586,14 +2586,14 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 ; RV64-NEXT: bltu a1, a2, .LBB96_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB96_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -9,7 +9,7 @@ define <2 x i8> @vpload_v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <3 x i8> @vpload_v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <3 x i8> @llvm.vp.load.v3i8.p0v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl) @@ -69,7 +69,7 @@ define <2 x i16> @vpload_v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define <4 x i16> @vpload_v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl) @@ -117,7 +117,7 @@ define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define <4 x i32> @vpload_v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl) @@ -141,7 +141,7 @@ define <6 x i32> @vpload_v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v6i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <6 x i32> @llvm.vp.load.v6i32.p0v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 %evl) @@ -165,7 +165,7 @@ define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl) @@ -189,7 +189,7 @@ define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl) @@ -237,7 +237,7 @@ define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl) @@ -261,7 +261,7 @@ define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl) @@ -273,7 +273,7 @@ define <8 x half> @vpload_v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl) @@ -285,7 +285,7 @@ define <2 x float> @vpload_v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl) @@ -297,7 +297,7 @@ define <4 x float> @vpload_v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl) @@ -309,7 +309,7 @@ define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl) @@ -333,7 +333,7 @@ define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl) @@ -345,7 +345,7 @@ define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl) @@ -369,7 +369,7 @@ define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl) @@ -391,14 +391,14 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-NEXT: addi a3, a0, 128 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a3), v0.t ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: bltu a1, a2, .LBB31_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB31_4: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret @@ -429,7 +429,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-NEXT: addi a5, a1, 128 -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a5), v0.t ; CHECK-NEXT: addi a5, a2, -32 ; CHECK-NEXT: li a4, 0 @@ -445,13 +445,13 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-NEXT: addi a5, a1, 256 -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a5), v0.t ; CHECK-NEXT: bltu a3, a2, .LBB32_10 ; CHECK-NEXT: # %bb.9: ; CHECK-NEXT: li a3, 16 ; CHECK-NEXT: .LBB32_10: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.srem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -26,7 +26,7 @@ define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -48,7 +48,7 @@ define <2 x i8> @vrem_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -76,7 +76,7 @@ define <4 x i8> @vrem_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -98,7 +98,7 @@ define <4 x i8> @vrem_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -126,7 +126,7 @@ define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.srem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) @@ -138,7 +138,7 @@ define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -160,7 +160,7 @@ define <8 x i8> @vrem_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -188,7 +188,7 @@ define <16 x i8> @vrem_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -210,7 +210,7 @@ define <16 x i8> @vrem_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -238,7 +238,7 @@ define <2 x i16> @vrem_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -260,7 +260,7 @@ define <2 x i16> @vrem_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -288,7 +288,7 @@ define <4 x i16> @vrem_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -310,7 +310,7 @@ define <4 x i16> @vrem_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -338,7 +338,7 @@ define <8 x i16> @vrem_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -360,7 +360,7 @@ define <8 x i16> @vrem_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -388,7 +388,7 @@ define <16 x i16> @vrem_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -410,7 +410,7 @@ define <16 x i16> @vrem_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -438,7 +438,7 @@ define <2 x i32> @vrem_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -460,7 +460,7 @@ define <2 x i32> @vrem_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -488,7 +488,7 @@ define <4 x i32> @vrem_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -510,7 +510,7 @@ define <4 x i32> @vrem_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -538,7 +538,7 @@ define <8 x i32> @vrem_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -560,7 +560,7 @@ define <8 x i32> @vrem_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -588,7 +588,7 @@ define <16 x i32> @vrem_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -610,7 +610,7 @@ define <16 x i32> @vrem_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ define <2 x i64> @vrem_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -667,14 +667,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -716,7 +716,7 @@ define <4 x i64> @vrem_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -745,14 +745,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -794,7 +794,7 @@ define <8 x i64> @vrem_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -823,14 +823,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -872,7 +872,7 @@ define <16 x i64> @vrem_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -901,14 +901,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.urem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -25,7 +25,7 @@ define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -47,7 +47,7 @@ define <2 x i8> @vremu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -75,7 +75,7 @@ define <4 x i8> @vremu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -97,7 +97,7 @@ define <4 x i8> @vremu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -125,7 +125,7 @@ define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.urem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) @@ -137,7 +137,7 @@ define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -159,7 +159,7 @@ define <8 x i8> @vremu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -187,7 +187,7 @@ define <16 x i8> @vremu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -209,7 +209,7 @@ define <16 x i8> @vremu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -237,7 +237,7 @@ define <2 x i16> @vremu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -259,7 +259,7 @@ define <2 x i16> @vremu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -287,7 +287,7 @@ define <4 x i16> @vremu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -309,7 +309,7 @@ define <4 x i16> @vremu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -337,7 +337,7 @@ define <8 x i16> @vremu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -359,7 +359,7 @@ define <8 x i16> @vremu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -387,7 +387,7 @@ define <16 x i16> @vremu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -409,7 +409,7 @@ define <16 x i16> @vremu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -437,7 +437,7 @@ define <2 x i32> @vremu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -459,7 +459,7 @@ define <2 x i32> @vremu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -487,7 +487,7 @@ define <4 x i32> @vremu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -509,7 +509,7 @@ define <4 x i32> @vremu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -537,7 +537,7 @@ define <8 x i32> @vremu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -559,7 +559,7 @@ define <8 x i32> @vremu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -587,7 +587,7 @@ define <16 x i32> @vremu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -609,7 +609,7 @@ define <16 x i32> @vremu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -637,7 +637,7 @@ define <2 x i64> @vremu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -666,14 +666,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -715,7 +715,7 @@ define <4 x i64> @vremu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -744,14 +744,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -793,7 +793,7 @@ define <8 x i64> @vremu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -822,14 +822,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -871,7 +871,7 @@ define <16 x i64> @vremu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -900,14 +900,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -9,7 +9,7 @@ define <2 x i8> @vrsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -35,7 +35,7 @@ define <2 x i8> @vrsub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 2, i32 0 @@ -63,7 +63,7 @@ define <4 x i8> @vrsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define <4 x i8> @vrsub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 2, i32 0 @@ -117,7 +117,7 @@ define <8 x i8> @vrsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ define <8 x i8> @vrsub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 2, i32 0 @@ -171,7 +171,7 @@ define <16 x i8> @vrsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -197,7 +197,7 @@ define <16 x i8> @vrsub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 2, i32 0 @@ -225,7 +225,7 @@ define <2 x i16> @vrsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -251,7 +251,7 @@ define <2 x i16> @vrsub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 2, i32 0 @@ -279,7 +279,7 @@ define <4 x i16> @vrsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -305,7 +305,7 @@ define <4 x i16> @vrsub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 2, i32 0 @@ -333,7 +333,7 @@ define <8 x i16> @vrsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -359,7 +359,7 @@ define <8 x i16> @vrsub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 2, i32 0 @@ -387,7 +387,7 @@ define <16 x i16> @vrsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -413,7 +413,7 @@ define <16 x i16> @vrsub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 2, i32 0 @@ -441,7 +441,7 @@ define <2 x i32> @vrsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -467,7 +467,7 @@ define <2 x i32> @vrsub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 2, i32 0 @@ -495,7 +495,7 @@ define <4 x i32> @vrsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -521,7 +521,7 @@ define <4 x i32> @vrsub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 2, i32 0 @@ -549,7 +549,7 @@ define <8 x i32> @vrsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -575,7 +575,7 @@ define <8 x i32> @vrsub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 2, i32 0 @@ -603,7 +603,7 @@ define <16 x i32> @vrsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -629,7 +629,7 @@ define <16 x i32> @vrsub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 2, i32 0 @@ -664,14 +664,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -711,7 +711,7 @@ define <2 x i64> @vrsub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 2, i32 0 @@ -746,14 +746,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -793,7 +793,7 @@ define <4 x i64> @vrsub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 2, i32 0 @@ -828,14 +828,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -875,7 +875,7 @@ define <8 x i64> @vrsub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 2, i32 0 @@ -910,14 +910,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -957,7 +957,7 @@ define <16 x i64> @vrsub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.shl.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -24,7 +24,7 @@ define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -46,7 +46,7 @@ define <2 x i8> @vsll_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -72,7 +72,7 @@ define <2 x i8> @vsll_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 3, i32 0 @@ -100,7 +100,7 @@ define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.vp.shl.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) @@ -112,7 +112,7 @@ define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -134,7 +134,7 @@ define <4 x i8> @vsll_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -160,7 +160,7 @@ define <4 x i8> @vsll_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 3, i32 0 @@ -188,7 +188,7 @@ define <8 x i8> @vsll_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -210,7 +210,7 @@ define <8 x i8> @vsll_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -236,7 +236,7 @@ define <8 x i8> @vsll_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 3, i32 0 @@ -264,7 +264,7 @@ define <16 x i8> @vsll_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -286,7 +286,7 @@ define <16 x i8> @vsll_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -312,7 +312,7 @@ define <16 x i8> @vsll_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 3, i32 0 @@ -340,7 +340,7 @@ define <2 x i16> @vsll_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -362,7 +362,7 @@ define <2 x i16> @vsll_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -388,7 +388,7 @@ define <2 x i16> @vsll_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 3, i32 0 @@ -416,7 +416,7 @@ define <4 x i16> @vsll_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -438,7 +438,7 @@ define <4 x i16> @vsll_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -464,7 +464,7 @@ define <4 x i16> @vsll_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 3, i32 0 @@ -492,7 +492,7 @@ define <8 x i16> @vsll_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -514,7 +514,7 @@ define <8 x i16> @vsll_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -540,7 +540,7 @@ define <8 x i16> @vsll_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 3, i32 0 @@ -568,7 +568,7 @@ define <16 x i16> @vsll_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -590,7 +590,7 @@ define <16 x i16> @vsll_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -616,7 +616,7 @@ define <16 x i16> @vsll_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 3, i32 0 @@ -644,7 +644,7 @@ define <2 x i32> @vsll_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -666,7 +666,7 @@ define <2 x i32> @vsll_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -692,7 +692,7 @@ define <2 x i32> @vsll_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 3, i32 0 @@ -720,7 +720,7 @@ define <4 x i32> @vsll_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -742,7 +742,7 @@ define <4 x i32> @vsll_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -768,7 +768,7 @@ define <4 x i32> @vsll_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 3, i32 0 @@ -796,7 +796,7 @@ define <8 x i32> @vsll_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -818,7 +818,7 @@ define <8 x i32> @vsll_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -844,7 +844,7 @@ define <8 x i32> @vsll_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 3, i32 0 @@ -872,7 +872,7 @@ define <16 x i32> @vsll_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -894,7 +894,7 @@ define <16 x i32> @vsll_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -920,7 +920,7 @@ define <16 x i32> @vsll_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 3, i32 0 @@ -948,7 +948,7 @@ define <2 x i64> @vsll_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -970,13 +970,13 @@ define <2 x i64> @vsll_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1008,7 +1008,7 @@ define <2 x i64> @vsll_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 3, i32 0 @@ -1036,7 +1036,7 @@ define <4 x i64> @vsll_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1058,13 +1058,13 @@ define <4 x i64> @vsll_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1096,7 +1096,7 @@ define <4 x i64> @vsll_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 3, i32 0 @@ -1124,7 +1124,7 @@ define <8 x i64> @vsll_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1146,13 +1146,13 @@ define <8 x i64> @vsll_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1184,7 +1184,7 @@ define <8 x i64> @vsll_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 3, i32 0 @@ -1212,7 +1212,7 @@ define <16 x i64> @vsll_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1234,13 +1234,13 @@ define <16 x i64> @vsll_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1272,7 +1272,7 @@ define <16 x i64> @vsll_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -26,7 +26,7 @@ define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -48,7 +48,7 @@ define <2 x i8> @vsra_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -74,7 +74,7 @@ define <2 x i8> @vsra_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 5, i32 0 @@ -102,7 +102,7 @@ define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -124,7 +124,7 @@ define <4 x i8> @vsra_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -150,7 +150,7 @@ define <4 x i8> @vsra_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 5, i32 0 @@ -178,7 +178,7 @@ define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v7i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) @@ -190,7 +190,7 @@ define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -212,7 +212,7 @@ define <8 x i8> @vsra_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -238,7 +238,7 @@ define <8 x i8> @vsra_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 5, i32 0 @@ -266,7 +266,7 @@ define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -288,7 +288,7 @@ define <16 x i8> @vsra_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -314,7 +314,7 @@ define <16 x i8> @vsra_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 5, i32 0 @@ -342,7 +342,7 @@ define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -364,7 +364,7 @@ define <2 x i16> @vsra_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -390,7 +390,7 @@ define <2 x i16> @vsra_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 5, i32 0 @@ -418,7 +418,7 @@ define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -440,7 +440,7 @@ define <4 x i16> @vsra_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -466,7 +466,7 @@ define <4 x i16> @vsra_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 5, i32 0 @@ -494,7 +494,7 @@ define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -516,7 +516,7 @@ define <8 x i16> @vsra_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -542,7 +542,7 @@ define <8 x i16> @vsra_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 5, i32 0 @@ -570,7 +570,7 @@ define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -592,7 +592,7 @@ define <16 x i16> @vsra_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -618,7 +618,7 @@ define <16 x i16> @vsra_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 5, i32 0 @@ -646,7 +646,7 @@ define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -668,7 +668,7 @@ define <2 x i32> @vsra_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -694,7 +694,7 @@ define <2 x i32> @vsra_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 5, i32 0 @@ -722,7 +722,7 @@ define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -744,7 +744,7 @@ define <4 x i32> @vsra_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -770,7 +770,7 @@ define <4 x i32> @vsra_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 5, i32 0 @@ -798,7 +798,7 @@ define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -820,7 +820,7 @@ define <8 x i32> @vsra_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -846,7 +846,7 @@ define <8 x i32> @vsra_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 5, i32 0 @@ -874,7 +874,7 @@ define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -896,7 +896,7 @@ define <16 x i32> @vsra_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -922,7 +922,7 @@ define <16 x i32> @vsra_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 5, i32 0 @@ -950,7 +950,7 @@ define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -972,13 +972,13 @@ define <2 x i64> @vsra_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1010,7 +1010,7 @@ define <2 x i64> @vsra_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 5, i32 0 @@ -1038,7 +1038,7 @@ define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1060,13 +1060,13 @@ define <4 x i64> @vsra_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1098,7 +1098,7 @@ define <4 x i64> @vsra_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 5, i32 0 @@ -1126,7 +1126,7 @@ define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1148,13 +1148,13 @@ define <8 x i64> @vsra_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1186,7 +1186,7 @@ define <8 x i64> @vsra_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 5, i32 0 @@ -1214,7 +1214,7 @@ define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1236,13 +1236,13 @@ define <16 x i64> @vsra_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1274,7 +1274,7 @@ define <16 x i64> @vsra_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -25,7 +25,7 @@ define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -47,7 +47,7 @@ define <2 x i8> @vsrl_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -73,7 +73,7 @@ define <2 x i8> @vsrl_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 @@ -101,7 +101,7 @@ define <4 x i8> @vsrl_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -123,7 +123,7 @@ define <4 x i8> @vsrl_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -149,7 +149,7 @@ define <4 x i8> @vsrl_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 4, i32 0 @@ -177,7 +177,7 @@ define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v7i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) @@ -189,7 +189,7 @@ define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -211,7 +211,7 @@ define <8 x i8> @vsrl_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -237,7 +237,7 @@ define <8 x i8> @vsrl_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 @@ -265,7 +265,7 @@ define <16 x i8> @vsrl_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -287,7 +287,7 @@ define <16 x i8> @vsrl_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -313,7 +313,7 @@ define <16 x i8> @vsrl_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 4, i32 0 @@ -341,7 +341,7 @@ define <2 x i16> @vsrl_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -363,7 +363,7 @@ define <2 x i16> @vsrl_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -389,7 +389,7 @@ define <2 x i16> @vsrl_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 4, i32 0 @@ -417,7 +417,7 @@ define <4 x i16> @vsrl_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -439,7 +439,7 @@ define <4 x i16> @vsrl_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -465,7 +465,7 @@ define <4 x i16> @vsrl_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 4, i32 0 @@ -493,7 +493,7 @@ define <8 x i16> @vsrl_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -515,7 +515,7 @@ define <8 x i16> @vsrl_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -541,7 +541,7 @@ define <8 x i16> @vsrl_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 4, i32 0 @@ -569,7 +569,7 @@ define <16 x i16> @vsrl_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -591,7 +591,7 @@ define <16 x i16> @vsrl_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -617,7 +617,7 @@ define <16 x i16> @vsrl_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 4, i32 0 @@ -645,7 +645,7 @@ define <2 x i32> @vsrl_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -667,7 +667,7 @@ define <2 x i32> @vsrl_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -693,7 +693,7 @@ define <2 x i32> @vsrl_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 4, i32 0 @@ -721,7 +721,7 @@ define <4 x i32> @vsrl_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -743,7 +743,7 @@ define <4 x i32> @vsrl_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -769,7 +769,7 @@ define <4 x i32> @vsrl_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 4, i32 0 @@ -797,7 +797,7 @@ define <8 x i32> @vsrl_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -819,7 +819,7 @@ define <8 x i32> @vsrl_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -845,7 +845,7 @@ define <8 x i32> @vsrl_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 @@ -873,7 +873,7 @@ define <16 x i32> @vsrl_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -895,7 +895,7 @@ define <16 x i32> @vsrl_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -921,7 +921,7 @@ define <16 x i32> @vsrl_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 4, i32 0 @@ -949,7 +949,7 @@ define <2 x i64> @vsrl_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -971,13 +971,13 @@ define <2 x i64> @vsrl_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1009,7 +1009,7 @@ define <2 x i64> @vsrl_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 4, i32 0 @@ -1037,7 +1037,7 @@ define <4 x i64> @vsrl_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1059,13 +1059,13 @@ define <4 x i64> @vsrl_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1097,7 +1097,7 @@ define <4 x i64> @vsrl_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 4, i32 0 @@ -1125,7 +1125,7 @@ define <8 x i64> @vsrl_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1147,13 +1147,13 @@ define <8 x i64> @vsrl_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1185,7 +1185,7 @@ define <8 x i64> @vsrl_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 @@ -1213,7 +1213,7 @@ define <16 x i64> @vsrl_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1235,13 +1235,13 @@ define <16 x i64> @vsrl_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1273,7 +1273,7 @@ define <16 x i64> @vsrl_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.sub.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define <3 x i8> @vsub_vx_v3i8(<3 x i8> %va, i8 %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <3 x i8> poison, i8 %b, i32 0 @@ -121,7 +121,7 @@ define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -143,7 +143,7 @@ define <4 x i8> @vsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ define <8 x i8> @vsub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -193,7 +193,7 @@ define <8 x i8> @vsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -221,7 +221,7 @@ define <16 x i8> @vsub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -243,7 +243,7 @@ define <16 x i8> @vsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -271,7 +271,7 @@ define <2 x i16> @vsub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -293,7 +293,7 @@ define <2 x i16> @vsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -321,7 +321,7 @@ define <4 x i16> @vsub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -343,7 +343,7 @@ define <4 x i16> @vsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -371,7 +371,7 @@ define <8 x i16> @vsub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -393,7 +393,7 @@ define <8 x i16> @vsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -421,7 +421,7 @@ define <16 x i16> @vsub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -443,7 +443,7 @@ define <16 x i16> @vsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -471,7 +471,7 @@ define <2 x i32> @vsub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -493,7 +493,7 @@ define <2 x i32> @vsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -521,7 +521,7 @@ define <4 x i32> @vsub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -543,7 +543,7 @@ define <4 x i32> @vsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -571,7 +571,7 @@ define <8 x i32> @vsub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -593,7 +593,7 @@ define <8 x i32> @vsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -621,7 +621,7 @@ define <16 x i32> @vsub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -643,7 +643,7 @@ define <16 x i32> @vsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -671,7 +671,7 @@ define <2 x i64> @vsub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -700,14 +700,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -749,7 +749,7 @@ define <4 x i64> @vsub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -778,14 +778,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -827,7 +827,7 @@ define <8 x i64> @vsub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -856,14 +856,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -905,7 +905,7 @@ define <16 x i64> @vsub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -934,14 +934,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) @@ -43,7 +43,7 @@ define <2 x i8> @vxor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vxor_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <2 x i8> @vxor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 7, i32 0 @@ -107,7 +107,7 @@ define <2 x i8> @vxor_vi_v2i8_1(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 @@ -135,7 +135,7 @@ define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) @@ -157,7 +157,7 @@ define <4 x i8> @vxor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -183,7 +183,7 @@ define <4 x i8> @vxor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 7, i32 0 @@ -209,7 +209,7 @@ define <4 x i8> @vxor_vi_v4i8_1(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 @@ -237,7 +237,7 @@ define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) @@ -259,7 +259,7 @@ define <8 x i8> @vxor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -285,7 +285,7 @@ define <8 x i8> @vxor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 7, i32 0 @@ -311,7 +311,7 @@ define <8 x i8> @vxor_vi_v8i8_1(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 @@ -339,7 +339,7 @@ define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v9i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl) @@ -361,7 +361,7 @@ define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v9i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0 @@ -387,7 +387,7 @@ define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v9i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 7, i32 0 @@ -413,7 +413,7 @@ define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v9i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0 @@ -441,7 +441,7 @@ define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) @@ -463,7 +463,7 @@ define <16 x i8> @vxor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -489,7 +489,7 @@ define <16 x i8> @vxor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 7, i32 0 @@ -515,7 +515,7 @@ define <16 x i8> @vxor_vi_v16i8_1(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 @@ -543,7 +543,7 @@ define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) @@ -565,7 +565,7 @@ define <2 x i16> @vxor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -591,7 +591,7 @@ define <2 x i16> @vxor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 7, i32 0 @@ -617,7 +617,7 @@ define <2 x i16> @vxor_vi_v2i16_1(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 @@ -645,7 +645,7 @@ define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) @@ -667,7 +667,7 @@ define <4 x i16> @vxor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -693,7 +693,7 @@ define <4 x i16> @vxor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 7, i32 0 @@ -719,7 +719,7 @@ define <4 x i16> @vxor_vi_v4i16_1(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 @@ -747,7 +747,7 @@ define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) @@ -769,7 +769,7 @@ define <8 x i16> @vxor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -795,7 +795,7 @@ define <8 x i16> @vxor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 7, i32 0 @@ -821,7 +821,7 @@ define <8 x i16> @vxor_vi_v8i16_1(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 @@ -849,7 +849,7 @@ define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) @@ -871,7 +871,7 @@ define <16 x i16> @vxor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -897,7 +897,7 @@ define <16 x i16> @vxor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 7, i32 0 @@ -923,7 +923,7 @@ define <16 x i16> @vxor_vi_v16i16_1(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 @@ -951,7 +951,7 @@ define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) @@ -973,7 +973,7 @@ define <2 x i32> @vxor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -999,7 +999,7 @@ define <2 x i32> @vxor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 7, i32 0 @@ -1025,7 +1025,7 @@ define <2 x i32> @vxor_vi_v2i32_1(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 @@ -1053,7 +1053,7 @@ define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) @@ -1075,7 +1075,7 @@ define <4 x i32> @vxor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -1101,7 +1101,7 @@ define <4 x i32> @vxor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 7, i32 0 @@ -1127,7 +1127,7 @@ define <4 x i32> @vxor_vi_v4i32_1(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 @@ -1155,7 +1155,7 @@ define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) @@ -1177,7 +1177,7 @@ define <8 x i32> @vxor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -1203,7 +1203,7 @@ define <8 x i32> @vxor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 7, i32 0 @@ -1229,7 +1229,7 @@ define <8 x i32> @vxor_vi_v8i32_1(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 @@ -1257,7 +1257,7 @@ define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) @@ -1279,7 +1279,7 @@ define <16 x i32> @vxor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -1305,7 +1305,7 @@ define <16 x i32> @vxor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 7, i32 0 @@ -1331,7 +1331,7 @@ define <16 x i32> @vxor_vi_v16i32_1(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 @@ -1359,7 +1359,7 @@ define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) @@ -1388,14 +1388,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1435,7 +1435,7 @@ define <2 x i64> @vxor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 7, i32 0 @@ -1461,7 +1461,7 @@ define <2 x i64> @vxor_vi_v2i64_1(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 @@ -1489,7 +1489,7 @@ define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) @@ -1518,14 +1518,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1565,7 +1565,7 @@ define <4 x i64> @vxor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 7, i32 0 @@ -1591,7 +1591,7 @@ define <4 x i64> @vxor_vi_v4i64_1(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 @@ -1619,7 +1619,7 @@ define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) @@ -1648,14 +1648,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1695,7 +1695,7 @@ define <8 x i64> @vxor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 7, i32 0 @@ -1721,7 +1721,7 @@ define <8 x i64> @vxor_vi_v8i64_1(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 @@ -1749,7 +1749,7 @@ define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) @@ -1778,14 +1778,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1825,7 +1825,7 @@ define <16 x i64> @vxor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 7, i32 0 @@ -1851,7 +1851,7 @@ define <16 x i64> @vxor_vi_v16i64_1(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -9,7 +9,7 @@ define <4 x i16> @vzext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define <4 x i32> @vzext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define <4 x i64> @vzext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define <4 x i32> @vzext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define <4 x i64> @vzext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -129,7 +129,7 @@ define <4 x i64> @vzext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -164,14 +164,14 @@ ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vzext.vf2 v16, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB12_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vzext.vf2 v24, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv1f16( %va, %m, i32 %evl) @@ -29,13 +32,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -51,13 +55,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv2f16( %va, %m, i32 %evl) @@ -69,13 +76,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,13 +99,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv4f16( %va, %m, i32 %evl) @@ -109,13 +120,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -129,17 +141,20 @@ define @vp_floor_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv8f16( %va, %m, i32 %evl) @@ -151,13 +166,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -171,17 +187,20 @@ define @vp_floor_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv16f16( %va, %m, i32 %evl) @@ -193,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -213,17 +233,20 @@ define @vp_floor_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv32f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv32f16( %va, %m, i32 %evl) @@ -235,13 +258,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,13 +281,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv1f32( %va, %m, i32 %evl) @@ -275,13 +302,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -297,13 +325,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv2f32( %va, %m, i32 %evl) @@ -315,13 +346,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -335,17 +367,20 @@ define @vp_floor_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv4f32( %va, %m, i32 %evl) @@ -357,13 +392,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -377,17 +413,20 @@ define @vp_floor_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv8f32( %va, %m, i32 %evl) @@ -399,13 +438,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -419,17 +459,20 @@ define @vp_floor_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv16f32( %va, %m, i32 %evl) @@ -441,13 +484,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -463,13 +507,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv1f64( %va, %m, i32 %evl) @@ -481,13 +528,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -501,17 +549,20 @@ define @vp_floor_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv2f64( %va, %m, i32 %evl) @@ -523,13 +574,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,17 +595,20 @@ define @vp_floor_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv4f64( %va, %m, i32 %evl) @@ -565,13 +620,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -585,17 +641,20 @@ define @vp_floor_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv7f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI28_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI28_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv7f64( %va, %m, i32 %evl) @@ -607,13 +666,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI29_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI29_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -627,17 +687,20 @@ define @vp_floor_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI30_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI30_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv8f64( %va, %m, i32 %evl) @@ -649,13 +712,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI31_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI31_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -670,58 +734,70 @@ define @vp_floor_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v2, v0, a4 +; CHECK-NEXT: vslidedown.vx v25, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 2 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -741,26 +817,28 @@ ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 2 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -11,13 +11,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv1f16( %x) @@ -30,13 +31,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv2f16( %x) @@ -49,13 +51,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv4f16( %x) @@ -68,13 +71,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv8f16( %x) @@ -87,13 +91,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv16f16( %x) @@ -106,13 +111,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv32f16( %x) @@ -125,13 +131,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv1f32( %x) @@ -144,13 +151,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv2f32( %x) @@ -163,13 +171,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv4f32( %x) @@ -182,13 +191,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv8f32( %x) @@ -201,13 +211,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv16f32( %x) @@ -220,13 +231,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv1f64( %x) @@ -239,13 +251,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv2f64( %x) @@ -258,13 +271,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv4f64( %x) @@ -277,13 +291,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.round.nxv8f64( %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll @@ -11,13 +11,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv1f16( %x) @@ -30,13 +31,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv2f16( %x) @@ -49,13 +51,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv4f16( %x) @@ -68,13 +71,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv8f16( %x) @@ -87,13 +91,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv16f16( %x) @@ -106,13 +111,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv32f16( %x) @@ -125,13 +131,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv1f32( %x) @@ -144,13 +151,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv2f32( %x) @@ -163,13 +171,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv4f32( %x) @@ -182,13 +191,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv8f32( %x) @@ -201,13 +211,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv16f32( %x) @@ -220,13 +231,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv1f64( %x) @@ -239,13 +251,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv2f64( %x) @@ -258,13 +271,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv4f64( %x) @@ -277,13 +291,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.roundeven.nxv8f64( %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -9,11 +9,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv1f16( %x) @@ -26,11 +27,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv2f16( %x) @@ -43,11 +45,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv4f16( %x) @@ -60,11 +63,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv8f16( %x) @@ -77,11 +81,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv16f16( %x) @@ -94,11 +99,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv32f16( %x) @@ -111,11 +117,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv1f32( %x) @@ -128,11 +135,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv2f32( %x) @@ -145,11 +153,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv4f32( %x) @@ -162,11 +171,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv8f32( %x) @@ -179,11 +189,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv16f32( %x) @@ -196,11 +207,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) @@ -213,11 +225,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv2f64( %x) @@ -230,11 +243,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) @@ -247,11 +261,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %a = call @llvm.trunc.nxv8f64( %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -380,7 +380,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v22, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll @@ -5,7 +5,7 @@ define @masked_load_nxv1f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1f16(* %a, i32 2, %mask, undef) @@ -16,7 +16,7 @@ define @masked_load_nxv1f32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1f32(* %a, i32 4, %mask, undef) @@ -27,7 +27,7 @@ define @masked_load_nxv1f64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1f64(* %a, i32 8, %mask, undef) @@ -38,7 +38,7 @@ define @masked_load_nxv2f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2f16(* %a, i32 2, %mask, undef) @@ -49,7 +49,7 @@ define @masked_load_nxv2f32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2f32(* %a, i32 4, %mask, undef) @@ -60,7 +60,7 @@ define @masked_load_nxv2f64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2f64(* %a, i32 8, %mask, undef) @@ -71,7 +71,7 @@ define @masked_load_nxv4f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4f16(* %a, i32 2, %mask, undef) @@ -82,7 +82,7 @@ define @masked_load_nxv4f32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4f32(* %a, i32 4, %mask, undef) @@ -93,7 +93,7 @@ define @masked_load_nxv4f64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4f64(* %a, i32 8, %mask, undef) @@ -104,7 +104,7 @@ define @masked_load_nxv8f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8f16(* %a, i32 2, %mask, undef) @@ -115,7 +115,7 @@ define @masked_load_nxv8f32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8f32(* %a, i32 4, %mask, undef) @@ -126,7 +126,7 @@ define @masked_load_nxv8f64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8f64(* %a, i32 8, %mask, undef) @@ -137,7 +137,7 @@ define @masked_load_nxv16f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16f16(* %a, i32 2, %mask, undef) @@ -148,7 +148,7 @@ define @masked_load_nxv16f32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16f32(* %a, i32 4, %mask, undef) @@ -159,7 +159,7 @@ define @masked_load_nxv32f16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv32f16(* %a, i32 2, %mask, undef) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll @@ -5,7 +5,7 @@ define @masked_load_nxv1i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1i8(* %a, i32 1, %mask, undef) @@ -16,7 +16,7 @@ define @masked_load_nxv1i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1i16(* %a, i32 2, %mask, undef) @@ -27,7 +27,7 @@ define @masked_load_nxv1i32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1i32(* %a, i32 4, %mask, undef) @@ -38,7 +38,7 @@ define @masked_load_nxv1i64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv1i64(* %a, i32 8, %mask, undef) @@ -49,7 +49,7 @@ define @masked_load_nxv2i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2i8(* %a, i32 1, %mask, undef) @@ -60,7 +60,7 @@ define @masked_load_nxv2i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2i16(* %a, i32 2, %mask, undef) @@ -71,7 +71,7 @@ define @masked_load_nxv2i32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2i32(* %a, i32 4, %mask, undef) @@ -82,7 +82,7 @@ define @masked_load_nxv2i64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv2i64(* %a, i32 8, %mask, undef) @@ -93,7 +93,7 @@ define @masked_load_nxv4i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4i8(* %a, i32 1, %mask, undef) @@ -104,7 +104,7 @@ define @masked_load_nxv4i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4i16(* %a, i32 2, %mask, undef) @@ -115,7 +115,7 @@ define @masked_load_nxv4i32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4i32(* %a, i32 4, %mask, undef) @@ -126,7 +126,7 @@ define @masked_load_nxv4i64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv4i64(* %a, i32 8, %mask, undef) @@ -137,7 +137,7 @@ define @masked_load_nxv8i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i8(* %a, i32 1, %mask, undef) @@ -148,7 +148,7 @@ define @masked_load_nxv8i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i16(* %a, i32 2, %mask, undef) @@ -159,7 +159,7 @@ define @masked_load_nxv8i32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i32(* %a, i32 4, %mask, undef) @@ -170,7 +170,7 @@ define @masked_load_nxv8i64(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i64(* %a, i32 8, %mask, undef) @@ -181,7 +181,7 @@ define @masked_load_nxv16i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16i8(* %a, i32 1, %mask, undef) @@ -192,7 +192,7 @@ define @masked_load_nxv16i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16i16(* %a, i32 2, %mask, undef) @@ -203,7 +203,7 @@ define @masked_load_nxv16i32(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16i32(* %a, i32 4, %mask, undef) @@ -214,7 +214,7 @@ define @masked_load_nxv32i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv32i8(* %a, i32 1, %mask, undef) @@ -225,7 +225,7 @@ define @masked_load_nxv32i16(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv32i16(* %a, i32 2, %mask, undef) @@ -236,7 +236,7 @@ define @masked_load_nxv64i8(* %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv64i8(* %a, i32 1, %mask, undef) diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv1f16( %va, %m, i32 %evl) @@ -29,13 +32,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -51,13 +55,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv2f16( %va, %m, i32 %evl) @@ -69,13 +76,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,13 +99,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv4f16( %va, %m, i32 %evl) @@ -109,13 +120,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -129,17 +141,20 @@ define @vp_round_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv8f16( %va, %m, i32 %evl) @@ -151,13 +166,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -171,17 +187,20 @@ define @vp_round_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv16f16( %va, %m, i32 %evl) @@ -193,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -213,17 +233,20 @@ define @vp_round_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv32f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv32f16( %va, %m, i32 %evl) @@ -235,13 +258,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,13 +281,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv1f32( %va, %m, i32 %evl) @@ -275,13 +302,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -297,13 +325,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv2f32( %va, %m, i32 %evl) @@ -315,13 +346,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -335,17 +367,20 @@ define @vp_round_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv4f32( %va, %m, i32 %evl) @@ -357,13 +392,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -377,17 +413,20 @@ define @vp_round_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv8f32( %va, %m, i32 %evl) @@ -399,13 +438,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -419,17 +459,20 @@ define @vp_round_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv16f32( %va, %m, i32 %evl) @@ -441,13 +484,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -463,13 +507,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv1f64( %va, %m, i32 %evl) @@ -481,13 +528,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -501,17 +549,20 @@ define @vp_round_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv2f64( %va, %m, i32 %evl) @@ -523,13 +574,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,17 +595,20 @@ define @vp_round_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv4f64( %va, %m, i32 %evl) @@ -565,13 +620,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -585,17 +641,20 @@ define @vp_round_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv7f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI28_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI28_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv7f64( %va, %m, i32 %evl) @@ -607,13 +666,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI29_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI29_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -627,17 +687,20 @@ define @vp_round_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI30_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI30_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv8f64( %va, %m, i32 %evl) @@ -649,13 +712,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI31_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI31_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -670,58 +734,70 @@ define @vp_round_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v2, v0, a4 +; CHECK-NEXT: vslidedown.vx v25, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 4 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -741,26 +817,28 @@ ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 4 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -11,13 +11,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv1f16( %va, %m, i32 %evl) @@ -29,13 +32,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -51,13 +55,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv2f16( %va, %m, i32 %evl) @@ -69,13 +76,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,13 +99,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv4f16( %va, %m, i32 %evl) @@ -109,13 +120,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -129,17 +141,20 @@ define @vp_roundeven_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv8f16( %va, %m, i32 %evl) @@ -151,13 +166,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -171,17 +187,20 @@ define @vp_roundeven_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv16f16( %va, %m, i32 %evl) @@ -193,13 +212,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -213,17 +233,20 @@ define @vp_roundeven_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv32f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI10_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv32f16( %va, %m, i32 %evl) @@ -235,13 +258,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,13 +281,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI12_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv1f32( %va, %m, i32 %evl) @@ -275,13 +302,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -297,13 +325,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI14_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv2f32( %va, %m, i32 %evl) @@ -315,13 +346,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -335,17 +367,20 @@ define @vp_roundeven_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI16_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv4f32( %va, %m, i32 %evl) @@ -357,13 +392,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -377,17 +413,20 @@ define @vp_roundeven_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv8f32( %va, %m, i32 %evl) @@ -399,13 +438,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -419,17 +459,20 @@ define @vp_roundeven_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv16f32( %va, %m, i32 %evl) @@ -441,13 +484,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -463,13 +507,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI22_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv1f64( %va, %m, i32 %evl) @@ -481,13 +528,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -501,17 +549,20 @@ define @vp_roundeven_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI24_0)(a1) -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv2f64( %va, %m, i32 %evl) @@ -523,13 +574,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,17 +595,20 @@ define @vp_roundeven_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv4f64( %va, %m, i32 %evl) @@ -565,13 +620,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -585,17 +641,20 @@ define @vp_roundeven_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv7f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI28_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI28_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv7f64( %va, %m, i32 %evl) @@ -607,13 +666,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI29_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI29_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -627,17 +687,20 @@ define @vp_roundeven_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: lui a1, %hi(.LCPI30_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI30_0)(a1) -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv8f64( %va, %m, i32 %evl) @@ -649,13 +712,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI31_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI31_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -670,58 +734,70 @@ define @vp_roundeven_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v2, v0, a4 +; CHECK-NEXT: vslidedown.vx v25, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 0 -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -741,26 +817,28 @@ ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 0 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -2955,7 +2955,7 @@ ; CHECK-NEXT: .LBB46_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -2995,7 +2995,7 @@ ; CHECK-NEXT: .LBB47_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3037,7 +3037,7 @@ ; CHECK-NEXT: .LBB48_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v9, v8, v9, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) @@ -3077,7 +3077,7 @@ ; CHECK-NEXT: .LBB49_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3115,7 +3115,7 @@ ; CHECK-NEXT: .LBB50_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3155,7 +3155,7 @@ ; CHECK-NEXT: .LBB51_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3195,7 +3195,7 @@ ; CHECK-NEXT: .LBB52_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3235,7 +3235,7 @@ ; CHECK-NEXT: .LBB53_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3275,7 +3275,7 @@ ; CHECK-NEXT: .LBB54_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3315,7 +3315,7 @@ ; CHECK-NEXT: .LBB55_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3353,7 +3353,7 @@ ; CHECK-NEXT: .LBB56_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3393,7 +3393,7 @@ ; CHECK-NEXT: .LBB57_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3433,7 +3433,7 @@ ; CHECK-NEXT: .LBB58_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3473,7 +3473,7 @@ ; CHECK-NEXT: .LBB59_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3513,7 +3513,7 @@ ; CHECK-NEXT: .LBB60_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3553,7 +3553,7 @@ ; CHECK-NEXT: .LBB61_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3593,7 +3593,7 @@ ; CHECK-NEXT: .LBB62_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3633,7 +3633,7 @@ ; CHECK-NEXT: .LBB63_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) @@ -3674,7 +3674,7 @@ ; CHECK-NEXT: .LBB64_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v9, v8, v9, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll @@ -11,7 +11,7 @@ ; CHECK-LABEL: strided_load_i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 %stride, <32 x i1> %m) @@ -21,7 +21,7 @@ define <2 x i64> @strided_load_i64(ptr %p, i64 %stride, <2 x i1> %m) { ; CHECK-LABEL: strided_load_i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %res = call <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64> undef, ptr %p, i64 %stride, <2 x i1> %m) @@ -32,7 +32,7 @@ ; CHECK-LABEL: strided_load_i8_splat: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), zero, v0.t ; CHECK-NEXT: ret %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 0, <32 x i1> %m) @@ -44,7 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: li a2, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 -1, <32 x i1> %m) @@ -56,7 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: li a2, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 1, <32 x i1> %m) @@ -117,7 +117,7 @@ define @strided_load_vscale_i64(ptr %p, i64 %stride, %m) { ; CHECK-LABEL: strided_load_vscale_i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %res = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( undef, ptr %p, i64 %stride, %m) diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -9,13 +9,13 @@ define @strided_vpload_nxv1i8_i8(i8* %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8* %ptr, i8 %stride, %m, i32 %evl) @@ -27,13 +27,13 @@ define @strided_vpload_nxv1i8_i16(i8* %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8* %ptr, i16 %stride, %m, i32 %evl) @@ -45,13 +45,13 @@ define @strided_vpload_nxv1i8_i64(i8* %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, %m, i32 %evl) @@ -81,13 +81,13 @@ define @strided_vpload_nxv1i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) @@ -117,13 +117,13 @@ define @strided_vpload_nxv2i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) @@ -135,13 +135,13 @@ define @strided_vpload_nxv4i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) @@ -153,13 +153,13 @@ define @strided_vpload_nxv8i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) @@ -189,13 +189,13 @@ define @strided_vpload_nxv1i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) @@ -207,13 +207,13 @@ define @strided_vpload_nxv2i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) @@ -243,13 +243,13 @@ define @strided_vpload_nxv4i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) @@ -261,13 +261,13 @@ define @strided_vpload_nxv8i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) @@ -279,13 +279,13 @@ define @strided_vpload_nxv1i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) @@ -297,13 +297,13 @@ define @strided_vpload_nxv2i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) @@ -315,13 +315,13 @@ define @strided_vpload_nxv4i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) @@ -351,13 +351,13 @@ define @strided_vpload_nxv8i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) @@ -369,13 +369,13 @@ define @strided_vpload_nxv1i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) @@ -405,13 +405,13 @@ define @strided_vpload_nxv2i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) @@ -423,13 +423,13 @@ define @strided_vpload_nxv4i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) @@ -441,13 +441,13 @@ define @strided_vpload_nxv8i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) @@ -459,13 +459,13 @@ define @strided_vpload_nxv1f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) @@ -477,13 +477,13 @@ define @strided_vpload_nxv2f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) @@ -513,13 +513,13 @@ define @strided_vpload_nxv4f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) @@ -531,13 +531,13 @@ define @strided_vpload_nxv8f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) @@ -549,13 +549,13 @@ define @strided_vpload_nxv1f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) @@ -567,13 +567,13 @@ define @strided_vpload_nxv2f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) @@ -585,13 +585,13 @@ define @strided_vpload_nxv4f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) @@ -603,13 +603,13 @@ define @strided_vpload_nxv8f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) @@ -639,13 +639,13 @@ define @strided_vpload_nxv1f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) @@ -657,13 +657,13 @@ define @strided_vpload_nxv2f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) @@ -675,13 +675,13 @@ define @strided_vpload_nxv4f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) @@ -711,13 +711,13 @@ define @strided_vpload_nxv8f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) @@ -728,13 +728,13 @@ define @strided_vpload_nxv3f64(double* %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv3f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv3f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %v = call @llvm.experimental.vp.strided.load.nxv3f64.p0f64.i32(double* %ptr, i32 %stride, %mask, i32 %evl) @@ -782,9 +782,9 @@ ; CHECK-RV32-NEXT: .LBB42_4: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret @@ -808,9 +808,9 @@ ; CHECK-RV64-NEXT: .LBB42_4: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret @@ -899,7 +899,7 @@ ; CHECK-RV32-NEXT: .LBB44_6: ; CHECK-RV32-NEXT: mul t1, a6, a1 ; CHECK-RV32-NEXT: add t1, a0, t1 -; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t ; CHECK-RV32-NEXT: li t0, 0 ; CHECK-RV32-NEXT: sub t1, a3, a7 @@ -916,9 +916,9 @@ ; CHECK-RV32-NEXT: .LBB44_10: ; CHECK-RV32-NEXT: mul a2, a5, a1 ; CHECK-RV32-NEXT: add a2, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v24, (a2), a1, v0.t -; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: vs1r.v v24, (a4) @@ -950,7 +950,7 @@ ; CHECK-RV64-NEXT: .LBB44_6: ; CHECK-RV64-NEXT: mul t1, a6, a1 ; CHECK-RV64-NEXT: add t1, a0, t1 -; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t ; CHECK-RV64-NEXT: li t0, 0 ; CHECK-RV64-NEXT: sub t1, a2, a7 @@ -967,9 +967,9 @@ ; CHECK-RV64-NEXT: .LBB44_10: ; CHECK-RV64-NEXT: mul a2, a5, a1 ; CHECK-RV64-NEXT: add a2, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v24, (a2), a1, v0.t -; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: vs1r.v v24, (a3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -9,7 +9,7 @@ define @vadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vadd_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vadd_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -57,7 +57,7 @@ define @vadd_vx_nxv1i8_commute( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vadd_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -111,7 +111,7 @@ define @vadd_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i8( %va, %b, %m, i32 %evl) @@ -133,7 +133,7 @@ define @vadd_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -159,7 +159,7 @@ define @vadd_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -187,7 +187,7 @@ define @vadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv3i8( %va, %b, %m, i32 %evl) @@ -209,7 +209,7 @@ define @vadd_vx_nxv3i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -235,7 +235,7 @@ define @vadd_vi_nxv3i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -263,7 +263,7 @@ define @vadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i8( %va, %b, %m, i32 %evl) @@ -285,7 +285,7 @@ define @vadd_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -311,7 +311,7 @@ define @vadd_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -339,7 +339,7 @@ define @vadd_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i8( %va, %b, %m, i32 %evl) @@ -361,7 +361,7 @@ define @vadd_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -387,7 +387,7 @@ define @vadd_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -415,7 +415,7 @@ define @vadd_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i8( %va, %b, %m, i32 %evl) @@ -437,7 +437,7 @@ define @vadd_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -463,7 +463,7 @@ define @vadd_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -491,7 +491,7 @@ define @vadd_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv32i8( %va, %b, %m, i32 %evl) @@ -513,7 +513,7 @@ define @vadd_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -539,7 +539,7 @@ define @vadd_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -567,7 +567,7 @@ define @vadd_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv64i8( %va, %b, %m, i32 %evl) @@ -589,7 +589,7 @@ define @vadd_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -615,7 +615,7 @@ define @vadd_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -655,14 +655,14 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: sub a0, a1, a2 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: bltu a1, a0, .LBB50_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a0 ; CHECK-NEXT: .LBB50_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret @@ -706,7 +706,7 @@ define @vadd_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i16( %va, %b, %m, i32 %evl) @@ -728,7 +728,7 @@ define @vadd_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -754,7 +754,7 @@ define @vadd_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -782,7 +782,7 @@ define @vadd_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i16( %va, %b, %m, i32 %evl) @@ -804,7 +804,7 @@ define @vadd_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -830,7 +830,7 @@ define @vadd_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -858,7 +858,7 @@ define @vadd_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i16( %va, %b, %m, i32 %evl) @@ -880,7 +880,7 @@ define @vadd_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -906,7 +906,7 @@ define @vadd_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -934,7 +934,7 @@ define @vadd_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i16( %va, %b, %m, i32 %evl) @@ -956,7 +956,7 @@ define @vadd_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -982,7 +982,7 @@ define @vadd_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1010,7 +1010,7 @@ define @vadd_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i16( %va, %b, %m, i32 %evl) @@ -1032,7 +1032,7 @@ define @vadd_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1058,7 +1058,7 @@ define @vadd_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1086,7 +1086,7 @@ define @vadd_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv32i16( %va, %b, %m, i32 %evl) @@ -1108,7 +1108,7 @@ define @vadd_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1134,7 +1134,7 @@ define @vadd_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1162,7 +1162,7 @@ define @vadd_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i32( %va, %b, %m, i32 %evl) @@ -1184,7 +1184,7 @@ define @vadd_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1210,7 +1210,7 @@ define @vadd_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1238,7 +1238,7 @@ define @vadd_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i32( %va, %b, %m, i32 %evl) @@ -1260,7 +1260,7 @@ define @vadd_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1286,7 +1286,7 @@ define @vadd_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1314,7 +1314,7 @@ define @vadd_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i32( %va, %b, %m, i32 %evl) @@ -1336,7 +1336,7 @@ define @vadd_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1362,7 +1362,7 @@ define @vadd_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1390,7 +1390,7 @@ define @vadd_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i32( %va, %b, %m, i32 %evl) @@ -1412,7 +1412,7 @@ define @vadd_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1438,7 +1438,7 @@ define @vadd_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1466,7 +1466,7 @@ define @vadd_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i32( %va, %b, %m, i32 %evl) @@ -1488,7 +1488,7 @@ define @vadd_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1514,7 +1514,7 @@ define @vadd_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1556,13 +1556,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB118_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB118_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB118_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret @@ -1623,13 +1623,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB120_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB120_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB120_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret @@ -1654,7 +1654,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 -; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV32-NEXT: vadd.vi v8, v8, -1, v0.t ; RV32-NEXT: ret ; @@ -1665,9 +1665,9 @@ ; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v24, v0, a1 ; RV64-NEXT: slli a0, a0, 1 -; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t -; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t ; RV64-NEXT: ret @@ -1684,7 +1684,7 @@ define @vadd_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i64( %va, %b, %m, i32 %evl) @@ -1713,14 +1713,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1760,7 +1760,7 @@ define @vadd_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -1788,7 +1788,7 @@ define @vadd_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i64( %va, %b, %m, i32 %evl) @@ -1817,14 +1817,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1864,7 +1864,7 @@ define @vadd_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -1892,7 +1892,7 @@ define @vadd_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i64( %va, %b, %m, i32 %evl) @@ -1921,14 +1921,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1968,7 +1968,7 @@ define @vadd_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -1996,7 +1996,7 @@ define @vadd_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i64( %va, %b, %m, i32 %evl) @@ -2025,14 +2025,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2072,7 +2072,7 @@ define @vadd_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -9,7 +9,7 @@ define @vand_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vand_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vand_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ define @vand_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -99,7 +99,7 @@ define @vand_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i8( %va, %b, %m, i32 %evl) @@ -121,7 +121,7 @@ define @vand_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -147,7 +147,7 @@ define @vand_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -175,7 +175,7 @@ define @vand_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i8( %va, %b, %m, i32 %evl) @@ -197,7 +197,7 @@ define @vand_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -223,7 +223,7 @@ define @vand_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -251,7 +251,7 @@ define @vand_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i8( %va, %b, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vand_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -299,7 +299,7 @@ define @vand_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -327,7 +327,7 @@ define @vand_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i8( %va, %b, %m, i32 %evl) @@ -349,7 +349,7 @@ define @vand_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -375,7 +375,7 @@ define @vand_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -403,7 +403,7 @@ define @vand_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv32i8( %va, %b, %m, i32 %evl) @@ -425,7 +425,7 @@ define @vand_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -451,7 +451,7 @@ define @vand_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -479,7 +479,7 @@ define @vand_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv64i8( %va, %b, %m, i32 %evl) @@ -501,7 +501,7 @@ define @vand_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -527,7 +527,7 @@ define @vand_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -555,7 +555,7 @@ define @vand_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i16( %va, %b, %m, i32 %evl) @@ -577,7 +577,7 @@ define @vand_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -603,7 +603,7 @@ define @vand_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -631,7 +631,7 @@ define @vand_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i16( %va, %b, %m, i32 %evl) @@ -653,7 +653,7 @@ define @vand_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -679,7 +679,7 @@ define @vand_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -707,7 +707,7 @@ define @vand_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i16( %va, %b, %m, i32 %evl) @@ -729,7 +729,7 @@ define @vand_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -755,7 +755,7 @@ define @vand_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -783,7 +783,7 @@ define @vand_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i16( %va, %b, %m, i32 %evl) @@ -805,7 +805,7 @@ define @vand_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -831,7 +831,7 @@ define @vand_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -859,7 +859,7 @@ define @vand_vv_nxv14i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv14i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv14i16( %va, %b, %m, i32 %evl) @@ -881,7 +881,7 @@ define @vand_vx_nxv14i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv14i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -907,7 +907,7 @@ define @vand_vi_nxv14i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv14i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -935,7 +935,7 @@ define @vand_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i16( %va, %b, %m, i32 %evl) @@ -957,7 +957,7 @@ define @vand_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -983,7 +983,7 @@ define @vand_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1011,7 +1011,7 @@ define @vand_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv32i16( %va, %b, %m, i32 %evl) @@ -1033,7 +1033,7 @@ define @vand_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1045,7 +1045,7 @@ define @vand_vx_nxv32i16_commute( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv32i16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1071,7 +1071,7 @@ define @vand_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1099,7 +1099,7 @@ define @vand_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i32( %va, %b, %m, i32 %evl) @@ -1121,7 +1121,7 @@ define @vand_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1147,7 +1147,7 @@ define @vand_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1175,7 +1175,7 @@ define @vand_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i32( %va, %b, %m, i32 %evl) @@ -1197,7 +1197,7 @@ define @vand_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1223,7 +1223,7 @@ define @vand_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1251,7 +1251,7 @@ define @vand_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i32( %va, %b, %m, i32 %evl) @@ -1273,7 +1273,7 @@ define @vand_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1299,7 +1299,7 @@ define @vand_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1327,7 +1327,7 @@ define @vand_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i32( %va, %b, %m, i32 %evl) @@ -1349,7 +1349,7 @@ define @vand_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1375,7 +1375,7 @@ define @vand_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1403,7 +1403,7 @@ define @vand_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i32( %va, %b, %m, i32 %evl) @@ -1425,7 +1425,7 @@ define @vand_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1451,7 +1451,7 @@ define @vand_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1479,7 +1479,7 @@ define @vand_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i64( %va, %b, %m, i32 %evl) @@ -1508,14 +1508,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1555,7 +1555,7 @@ define @vand_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1583,7 +1583,7 @@ define @vand_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i64( %va, %b, %m, i32 %evl) @@ -1612,14 +1612,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1659,7 +1659,7 @@ define @vand_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1687,7 +1687,7 @@ define @vand_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i64( %va, %b, %m, i32 %evl) @@ -1716,14 +1716,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1763,7 +1763,7 @@ define @vand_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1791,7 +1791,7 @@ define @vand_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i64( %va, %b, %m, i32 %evl) @@ -1820,14 +1820,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1867,7 +1867,7 @@ define @vand_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll @@ -9,7 +9,7 @@ define @vfsgnj_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv1f16( %va, %vb, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfsgnj_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv2f16( %va, %vb, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfsgnj_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv4f16( %va, %vb, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfsgnj_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv8f16( %va, %vb, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfsgnj_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv16f16( %va, %vb, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfsgnj_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv32f16( %va, %vb, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfsgnj_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv1f32( %va, %vb, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfsgnj_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv2f32( %va, %vb, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfsgnj_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv4f32( %va, %vb, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfsgnj_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv8f32( %va, %vb, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfsgnj_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv16f32( %va, %vb, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfsgnj_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv1f64( %va, %vb, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfsgnj_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv2f64( %va, %vb, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfsgnj_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv4f64( %va, %vb, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfsgnj_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.copysign.nxv8f64( %va, %vb, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vadd.vv v9, v9, v9 ; CHECK-NEXT: vsra.vi v9, v9, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -29,7 +29,7 @@ define @vdiv_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i8( %va, %b, %m, i32 %evl) @@ -51,7 +51,7 @@ define @vdiv_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -79,7 +79,7 @@ define @vdiv_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i8( %va, %b, %m, i32 %evl) @@ -101,7 +101,7 @@ define @vdiv_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -129,7 +129,7 @@ define @vdiv_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv3i8( %va, %b, %m, i32 %evl) @@ -141,7 +141,7 @@ define @vdiv_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i8( %va, %b, %m, i32 %evl) @@ -163,7 +163,7 @@ define @vdiv_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -191,7 +191,7 @@ define @vdiv_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i8( %va, %b, %m, i32 %evl) @@ -213,7 +213,7 @@ define @vdiv_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -241,7 +241,7 @@ define @vdiv_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i8( %va, %b, %m, i32 %evl) @@ -263,7 +263,7 @@ define @vdiv_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -291,7 +291,7 @@ define @vdiv_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv32i8( %va, %b, %m, i32 %evl) @@ -313,7 +313,7 @@ define @vdiv_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -341,7 +341,7 @@ define @vdiv_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv64i8( %va, %b, %m, i32 %evl) @@ -363,7 +363,7 @@ define @vdiv_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -391,7 +391,7 @@ define @vdiv_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i16( %va, %b, %m, i32 %evl) @@ -413,7 +413,7 @@ define @vdiv_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -441,7 +441,7 @@ define @vdiv_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i16( %va, %b, %m, i32 %evl) @@ -463,7 +463,7 @@ define @vdiv_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -491,7 +491,7 @@ define @vdiv_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i16( %va, %b, %m, i32 %evl) @@ -513,7 +513,7 @@ define @vdiv_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -541,7 +541,7 @@ define @vdiv_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i16( %va, %b, %m, i32 %evl) @@ -563,7 +563,7 @@ define @vdiv_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -591,7 +591,7 @@ define @vdiv_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i16( %va, %b, %m, i32 %evl) @@ -613,7 +613,7 @@ define @vdiv_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -641,7 +641,7 @@ define @vdiv_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv32i16( %va, %b, %m, i32 %evl) @@ -663,7 +663,7 @@ define @vdiv_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -691,7 +691,7 @@ define @vdiv_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i32( %va, %b, %m, i32 %evl) @@ -713,7 +713,7 @@ define @vdiv_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -741,7 +741,7 @@ define @vdiv_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i32( %va, %b, %m, i32 %evl) @@ -763,7 +763,7 @@ define @vdiv_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -791,7 +791,7 @@ define @vdiv_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i32( %va, %b, %m, i32 %evl) @@ -813,7 +813,7 @@ define @vdiv_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -841,7 +841,7 @@ define @vdiv_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i32( %va, %b, %m, i32 %evl) @@ -863,7 +863,7 @@ define @vdiv_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -891,7 +891,7 @@ define @vdiv_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i32( %va, %b, %m, i32 %evl) @@ -913,7 +913,7 @@ define @vdiv_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -941,7 +941,7 @@ define @vdiv_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i64( %va, %b, %m, i32 %evl) @@ -970,14 +970,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1019,7 +1019,7 @@ define @vdiv_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i64( %va, %b, %m, i32 %evl) @@ -1048,14 +1048,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1097,7 +1097,7 @@ define @vdiv_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i64( %va, %b, %m, i32 %evl) @@ -1126,14 +1126,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1175,7 +1175,7 @@ define @vdiv_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i64( %va, %b, %m, i32 %evl) @@ -1204,14 +1204,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -28,7 +28,7 @@ define @vdivu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i8( %va, %b, %m, i32 %evl) @@ -50,7 +50,7 @@ define @vdivu_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -78,7 +78,7 @@ define @vdivu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i8( %va, %b, %m, i32 %evl) @@ -100,7 +100,7 @@ define @vdivu_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -128,7 +128,7 @@ define @vdivu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv3i8( %va, %b, %m, i32 %evl) @@ -140,7 +140,7 @@ define @vdivu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i8( %va, %b, %m, i32 %evl) @@ -162,7 +162,7 @@ define @vdivu_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -190,7 +190,7 @@ define @vdivu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i8( %va, %b, %m, i32 %evl) @@ -212,7 +212,7 @@ define @vdivu_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -240,7 +240,7 @@ define @vdivu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i8( %va, %b, %m, i32 %evl) @@ -262,7 +262,7 @@ define @vdivu_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -290,7 +290,7 @@ define @vdivu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv32i8( %va, %b, %m, i32 %evl) @@ -312,7 +312,7 @@ define @vdivu_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -340,7 +340,7 @@ define @vdivu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv64i8( %va, %b, %m, i32 %evl) @@ -362,7 +362,7 @@ define @vdivu_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -390,7 +390,7 @@ define @vdivu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i16( %va, %b, %m, i32 %evl) @@ -412,7 +412,7 @@ define @vdivu_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -440,7 +440,7 @@ define @vdivu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i16( %va, %b, %m, i32 %evl) @@ -462,7 +462,7 @@ define @vdivu_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -490,7 +490,7 @@ define @vdivu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i16( %va, %b, %m, i32 %evl) @@ -512,7 +512,7 @@ define @vdivu_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -540,7 +540,7 @@ define @vdivu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i16( %va, %b, %m, i32 %evl) @@ -562,7 +562,7 @@ define @vdivu_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -590,7 +590,7 @@ define @vdivu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i16( %va, %b, %m, i32 %evl) @@ -612,7 +612,7 @@ define @vdivu_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -640,7 +640,7 @@ define @vdivu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv32i16( %va, %b, %m, i32 %evl) @@ -662,7 +662,7 @@ define @vdivu_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -690,7 +690,7 @@ define @vdivu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i32( %va, %b, %m, i32 %evl) @@ -712,7 +712,7 @@ define @vdivu_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -740,7 +740,7 @@ define @vdivu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i32( %va, %b, %m, i32 %evl) @@ -762,7 +762,7 @@ define @vdivu_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -790,7 +790,7 @@ define @vdivu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i32( %va, %b, %m, i32 %evl) @@ -812,7 +812,7 @@ define @vdivu_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -840,7 +840,7 @@ define @vdivu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i32( %va, %b, %m, i32 %evl) @@ -862,7 +862,7 @@ define @vdivu_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -890,7 +890,7 @@ define @vdivu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i32( %va, %b, %m, i32 %evl) @@ -912,7 +912,7 @@ define @vdivu_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -940,7 +940,7 @@ define @vdivu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i64( %va, %b, %m, i32 %evl) @@ -969,14 +969,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1018,7 +1018,7 @@ define @vdivu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i64( %va, %b, %m, i32 %evl) @@ -1047,14 +1047,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1096,7 +1096,7 @@ define @vdivu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i64( %va, %b, %m, i32 %evl) @@ -1125,14 +1125,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1174,7 +1174,7 @@ define @vdivu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i64( %va, %b, %m, i32 %evl) @@ -1203,14 +1203,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -9,7 +9,7 @@ define @vfabs_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv1f16( %va, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfabs_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv2f16( %va, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfabs_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv4f16( %va, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfabs_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv8f16( %va, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfabs_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv16f16( %va, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfabs_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv32f16( %va, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfabs_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv1f32( %va, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfabs_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv2f32( %va, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfabs_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv4f32( %va, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfabs_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv8f32( %va, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfabs_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv16f32( %va, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfabs_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv1f64( %va, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfabs_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv2f64( %va, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfabs_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv4f64( %va, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfabs_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv7f64( %va, %m, i32 %evl) @@ -369,7 +369,7 @@ define @vfabs_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fabs.nxv8f64( %va, %m, i32 %evl) @@ -405,13 +405,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfabs.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -9,7 +9,7 @@ define @vfadd_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f16( %va, %b, %m, i32 %evl) @@ -31,7 +31,7 @@ define @vfadd_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -43,7 +43,7 @@ define @vfadd_vf_nxv1f16_commute( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -85,7 +85,7 @@ define @vfadd_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f16( %va, %b, %m, i32 %evl) @@ -107,7 +107,7 @@ define @vfadd_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -135,7 +135,7 @@ define @vfadd_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f16( %va, %b, %m, i32 %evl) @@ -157,7 +157,7 @@ define @vfadd_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -185,7 +185,7 @@ define @vfadd_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f16( %va, %b, %m, i32 %evl) @@ -207,7 +207,7 @@ define @vfadd_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -235,7 +235,7 @@ define @vfadd_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv16f16( %va, %b, %m, i32 %evl) @@ -257,7 +257,7 @@ define @vfadd_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -285,7 +285,7 @@ define @vfadd_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv32f16( %va, %b, %m, i32 %evl) @@ -307,7 +307,7 @@ define @vfadd_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -335,7 +335,7 @@ define @vfadd_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f32( %va, %b, %m, i32 %evl) @@ -357,7 +357,7 @@ define @vfadd_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -385,7 +385,7 @@ define @vfadd_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f32( %va, %b, %m, i32 %evl) @@ -407,7 +407,7 @@ define @vfadd_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -435,7 +435,7 @@ define @vfadd_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f32( %va, %b, %m, i32 %evl) @@ -457,7 +457,7 @@ define @vfadd_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -485,7 +485,7 @@ define @vfadd_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f32( %va, %b, %m, i32 %evl) @@ -507,7 +507,7 @@ define @vfadd_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -535,7 +535,7 @@ define @vfadd_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv16f32( %va, %b, %m, i32 %evl) @@ -557,7 +557,7 @@ define @vfadd_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -585,7 +585,7 @@ define @vfadd_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f64( %va, %b, %m, i32 %evl) @@ -607,7 +607,7 @@ define @vfadd_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -635,7 +635,7 @@ define @vfadd_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f64( %va, %b, %m, i32 %evl) @@ -657,7 +657,7 @@ define @vfadd_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -685,7 +685,7 @@ define @vfadd_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f64( %va, %b, %m, i32 %evl) @@ -707,7 +707,7 @@ define @vfadd_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -735,7 +735,7 @@ define @vfadd_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv7f64( %va, %b, %m, i32 %evl) @@ -747,7 +747,7 @@ define @vfadd_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f64( %va, %b, %m, i32 %evl) @@ -769,7 +769,7 @@ define @vfadd_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -9,7 +9,7 @@ define @vfdiv_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f16( %va, %b, %m, i32 %evl) @@ -31,7 +31,7 @@ define @vfdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -59,7 +59,7 @@ define @vfdiv_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f16( %va, %b, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -109,7 +109,7 @@ define @vfdiv_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f16( %va, %b, %m, i32 %evl) @@ -131,7 +131,7 @@ define @vfdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -159,7 +159,7 @@ define @vfdiv_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f16( %va, %b, %m, i32 %evl) @@ -181,7 +181,7 @@ define @vfdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -209,7 +209,7 @@ define @vfdiv_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv16f16( %va, %b, %m, i32 %evl) @@ -231,7 +231,7 @@ define @vfdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -259,7 +259,7 @@ define @vfdiv_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv32f16( %va, %b, %m, i32 %evl) @@ -281,7 +281,7 @@ define @vfdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -309,7 +309,7 @@ define @vfdiv_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f32( %va, %b, %m, i32 %evl) @@ -331,7 +331,7 @@ define @vfdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -359,7 +359,7 @@ define @vfdiv_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f32( %va, %b, %m, i32 %evl) @@ -381,7 +381,7 @@ define @vfdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -409,7 +409,7 @@ define @vfdiv_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f32( %va, %b, %m, i32 %evl) @@ -431,7 +431,7 @@ define @vfdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -459,7 +459,7 @@ define @vfdiv_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f32( %va, %b, %m, i32 %evl) @@ -481,7 +481,7 @@ define @vfdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -509,7 +509,7 @@ define @vfdiv_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv16f32( %va, %b, %m, i32 %evl) @@ -531,7 +531,7 @@ define @vfdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -559,7 +559,7 @@ define @vfdiv_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f64( %va, %b, %m, i32 %evl) @@ -581,7 +581,7 @@ define @vfdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -609,7 +609,7 @@ define @vfdiv_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f64( %va, %b, %m, i32 %evl) @@ -631,7 +631,7 @@ define @vfdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -659,7 +659,7 @@ define @vfdiv_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f64( %va, %b, %m, i32 %evl) @@ -681,7 +681,7 @@ define @vfdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -709,7 +709,7 @@ define @vfdiv_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv7f64( %va, %b, %m, i32 %evl) @@ -721,7 +721,7 @@ define @vfdiv_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f64( %va, %b, %m, i32 %evl) @@ -743,7 +743,7 @@ define @vfdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll @@ -9,7 +9,7 @@ define @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv1f16( %va, %vb, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv2f16( %va, %vb, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv4f16( %va, %vb, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv8f16( %va, %vb, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv16f16( %va, %vb, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfmax_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv32f16( %va, %vb, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv1f32( %va, %vb, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv2f32( %va, %vb, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv4f32( %va, %vb, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv8f32( %va, %vb, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfmax_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv16f32( %va, %vb, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv1f64( %va, %vb, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv2f64( %va, %vb, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv4f64( %va, %vb, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfmax_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.maxnum.nxv8f64( %va, %vb, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll @@ -9,7 +9,7 @@ define @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv1f16( %va, %vb, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv2f16( %va, %vb, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv4f16( %va, %vb, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv8f16( %va, %vb, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv16f16( %va, %vb, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfmin_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv32f16( %va, %vb, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv1f32( %va, %vb, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv2f32( %va, %vb, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv4f32( %va, %vb, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv8f32( %va, %vb, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfmin_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv16f32( %va, %vb, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv1f64( %va, %vb, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv2f64( %va, %vb, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv4f64( %va, %vb, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfmin_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.minnum.nxv8f64( %va, %vb, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -9,7 +9,7 @@ define @vfmul_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f16( %va, %b, %m, i32 %evl) @@ -31,7 +31,7 @@ define @vfmul_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -59,7 +59,7 @@ define @vfmul_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f16( %va, %b, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfmul_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -109,7 +109,7 @@ define @vfmul_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f16( %va, %b, %m, i32 %evl) @@ -131,7 +131,7 @@ define @vfmul_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -159,7 +159,7 @@ define @vfmul_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f16( %va, %b, %m, i32 %evl) @@ -181,7 +181,7 @@ define @vfmul_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -209,7 +209,7 @@ define @vfmul_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv16f16( %va, %b, %m, i32 %evl) @@ -231,7 +231,7 @@ define @vfmul_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -259,7 +259,7 @@ define @vfmul_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv32f16( %va, %b, %m, i32 %evl) @@ -281,7 +281,7 @@ define @vfmul_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -309,7 +309,7 @@ define @vfmul_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f32( %va, %b, %m, i32 %evl) @@ -331,7 +331,7 @@ define @vfmul_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -359,7 +359,7 @@ define @vfmul_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f32( %va, %b, %m, i32 %evl) @@ -381,7 +381,7 @@ define @vfmul_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -409,7 +409,7 @@ define @vfmul_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f32( %va, %b, %m, i32 %evl) @@ -431,7 +431,7 @@ define @vfmul_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -459,7 +459,7 @@ define @vfmul_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f32( %va, %b, %m, i32 %evl) @@ -481,7 +481,7 @@ define @vfmul_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -509,7 +509,7 @@ define @vfmul_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv16f32( %va, %b, %m, i32 %evl) @@ -531,7 +531,7 @@ define @vfmul_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -559,7 +559,7 @@ define @vfmul_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f64( %va, %b, %m, i32 %evl) @@ -581,7 +581,7 @@ define @vfmul_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -609,7 +609,7 @@ define @vfmul_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f64( %va, %b, %m, i32 %evl) @@ -631,7 +631,7 @@ define @vfmul_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -659,7 +659,7 @@ define @vfmul_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f64( %va, %b, %m, i32 %evl) @@ -681,7 +681,7 @@ define @vfmul_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -709,7 +709,7 @@ define @vfmul_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv7f64( %va, %b, %m, i32 %evl) @@ -721,7 +721,7 @@ define @vfmul_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f64( %va, %b, %m, i32 %evl) @@ -743,7 +743,7 @@ define @vfmul_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -9,7 +9,7 @@ define @vfneg_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfneg_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfneg_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfneg_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfneg_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfneg_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfneg_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfneg_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfneg_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfneg_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfneg_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfneg_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfneg_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfneg_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfneg_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv7f64( %va, %m, i32 %evl) @@ -369,7 +369,7 @@ define @vfneg_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) @@ -405,13 +405,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfneg.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -7,7 +7,7 @@ define @vfpext_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,9 +31,9 @@ define @vfpext_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fpext.nxv2f64.nxv2f16( %a, %m, i32 %vl) @@ -57,7 +57,7 @@ define @vfpext_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define @vfpext_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv7f32_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -106,13 +106,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v12, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB7_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll @@ -7,9 +7,8 @@ define @vfptosi_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i1.nxv2f16( %va, %m, i32 %evl) @@ -32,9 +31,8 @@ define @vfptosi_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i1.nxv2f32( %va, %m, i32 %evl) @@ -57,9 +55,8 @@ define @vfptosi_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -7,7 +7,7 @@ define @vfptosi_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i7_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vfptosi_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vfptosi_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, %m, i32 %evl) @@ -66,7 +66,7 @@ define @vfptosi_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -90,9 +90,9 @@ define @vfptosi_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, %m, i32 %evl) @@ -116,9 +116,9 @@ define @vfptosi_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, %m, i32 %evl) @@ -142,7 +142,7 @@ define @vfptosi_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -166,7 +166,7 @@ define @vfptosi_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, %m, i32 %evl) @@ -188,7 +188,7 @@ define @vfptosi_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -212,11 +212,11 @@ define @vfptosi_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, %m, i32 %evl) @@ -242,9 +242,9 @@ define @vfptosi_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, %m, i32 %evl) @@ -268,7 +268,7 @@ define @vfptosi_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -292,7 +292,7 @@ define @vfptosi_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, %m, i32 %evl) @@ -333,13 +333,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -370,13 +370,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll @@ -7,9 +7,8 @@ define @vfptoui_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i1.nxv2f16( %va, %m, i32 %evl) @@ -32,9 +31,8 @@ define @vfptoui_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i1.nxv2f32( %va, %m, i32 %evl) @@ -57,9 +55,8 @@ define @vfptoui_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -7,7 +7,7 @@ define @vfptoui_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i7_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vfptoui_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vfptoui_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f16( %va, %m, i32 %evl) @@ -66,7 +66,7 @@ define @vfptoui_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -90,9 +90,9 @@ define @vfptoui_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i64.nxv2f16( %va, %m, i32 %evl) @@ -116,9 +116,9 @@ define @vfptoui_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, %m, i32 %evl) @@ -142,7 +142,7 @@ define @vfptoui_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -166,7 +166,7 @@ define @vfptoui_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i32.nxv2f32( %va, %m, i32 %evl) @@ -188,7 +188,7 @@ define @vfptoui_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -212,11 +212,11 @@ define @vfptoui_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, %m, i32 %evl) @@ -242,9 +242,9 @@ define @vfptoui_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, %m, i32 %evl) @@ -268,7 +268,7 @@ define @vfptoui_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -292,7 +292,7 @@ define @vfptoui_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i64.nxv2f64( %va, %m, i32 %evl) @@ -333,13 +333,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -370,13 +370,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -7,7 +7,7 @@ define @vfptrunc_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,9 +31,9 @@ define @vfptrunc_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptrunc.nxv2f16.nxv2f64( %a, %m, i32 %vl) @@ -57,7 +57,7 @@ define @vfptrunc_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define @vfptrunc_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -112,13 +112,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB7_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -166,7 +166,7 @@ ; CHECK-NEXT: .LBB8_4: ; CHECK-NEXT: srli a7, a1, 2 ; CHECK-NEXT: slli t0, a1, 3 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t ; CHECK-NEXT: bltu a5, a1, .LBB8_6 ; CHECK-NEXT: # %bb.5: @@ -176,7 +176,7 @@ ; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v1, v24, a7 ; CHECK-NEXT: add a7, a0, t0 -; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma ; CHECK-NEXT: sub a4, a2, a4 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a5, vlenb @@ -206,7 +206,7 @@ ; CHECK-NEXT: vl8re64.v v16, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -217,7 +217,7 @@ ; CHECK-NEXT: # %bb.11: ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB8_12: -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -9,7 +9,7 @@ define @vfrdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -37,7 +37,7 @@ define @vfrdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -65,7 +65,7 @@ define @vfrdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -93,7 +93,7 @@ define @vfrdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -121,7 +121,7 @@ define @vfrdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -149,7 +149,7 @@ define @vfrdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -177,7 +177,7 @@ define @vfrdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -205,7 +205,7 @@ define @vfrdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -233,7 +233,7 @@ define @vfrdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -261,7 +261,7 @@ define @vfrdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -289,7 +289,7 @@ define @vfrdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -317,7 +317,7 @@ define @vfrdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -345,7 +345,7 @@ define @vfrdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -373,7 +373,7 @@ define @vfrdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -401,7 +401,7 @@ define @vfrdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -9,7 +9,7 @@ define @vfrsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -37,7 +37,7 @@ define @vfrsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -65,7 +65,7 @@ define @vfrsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -93,7 +93,7 @@ define @vfrsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -121,7 +121,7 @@ define @vfrsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -149,7 +149,7 @@ define @vfrsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -177,7 +177,7 @@ define @vfrsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -205,7 +205,7 @@ define @vfrsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -233,7 +233,7 @@ define @vfrsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -261,7 +261,7 @@ define @vfrsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -289,7 +289,7 @@ define @vfrsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -317,7 +317,7 @@ define @vfrsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -345,7 +345,7 @@ define @vfrsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -373,7 +373,7 @@ define @vfrsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -401,7 +401,7 @@ define @vfrsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -9,7 +9,7 @@ define @vfsqrt_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv1f16( %va, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vfsqrt_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv2f16( %va, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vfsqrt_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv4f16( %va, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfsqrt_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv8f16( %va, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vfsqrt_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv16f16( %va, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vfsqrt_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv32f16( %va, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vfsqrt_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv1f32( %va, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vfsqrt_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv2f32( %va, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vfsqrt_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv4f32( %va, %m, i32 %evl) @@ -225,7 +225,7 @@ define @vfsqrt_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv8f32( %va, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vfsqrt_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv16f32( %va, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vfsqrt_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv1f64( %va, %m, i32 %evl) @@ -297,7 +297,7 @@ define @vfsqrt_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv2f64( %va, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vfsqrt_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv4f64( %va, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vfsqrt_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv7f64( %va, %m, i32 %evl) @@ -369,7 +369,7 @@ define @vfsqrt_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sqrt.nxv8f64( %va, %m, i32 %evl) @@ -405,13 +405,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -9,7 +9,7 @@ define @vfsub_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f16( %va, %b, %m, i32 %evl) @@ -31,7 +31,7 @@ define @vfsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -59,7 +59,7 @@ define @vfsub_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f16( %va, %b, %m, i32 %evl) @@ -81,7 +81,7 @@ define @vfsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -109,7 +109,7 @@ define @vfsub_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f16( %va, %b, %m, i32 %evl) @@ -131,7 +131,7 @@ define @vfsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -159,7 +159,7 @@ define @vfsub_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f16( %va, %b, %m, i32 %evl) @@ -181,7 +181,7 @@ define @vfsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -209,7 +209,7 @@ define @vfsub_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv16f16( %va, %b, %m, i32 %evl) @@ -231,7 +231,7 @@ define @vfsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -259,7 +259,7 @@ define @vfsub_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv32f16( %va, %b, %m, i32 %evl) @@ -281,7 +281,7 @@ define @vfsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -309,7 +309,7 @@ define @vfsub_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f32( %va, %b, %m, i32 %evl) @@ -331,7 +331,7 @@ define @vfsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -359,7 +359,7 @@ define @vfsub_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f32( %va, %b, %m, i32 %evl) @@ -381,7 +381,7 @@ define @vfsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -409,7 +409,7 @@ define @vfsub_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f32( %va, %b, %m, i32 %evl) @@ -431,7 +431,7 @@ define @vfsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -459,7 +459,7 @@ define @vfsub_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f32( %va, %b, %m, i32 %evl) @@ -481,7 +481,7 @@ define @vfsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -509,7 +509,7 @@ define @vfsub_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv16f32( %va, %b, %m, i32 %evl) @@ -531,7 +531,7 @@ define @vfsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -559,7 +559,7 @@ define @vfsub_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f64( %va, %b, %m, i32 %evl) @@ -581,7 +581,7 @@ define @vfsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -609,7 +609,7 @@ define @vfsub_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f64( %va, %b, %m, i32 %evl) @@ -631,7 +631,7 @@ define @vfsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -659,7 +659,7 @@ define @vfsub_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f64( %va, %b, %m, i32 %evl) @@ -681,7 +681,7 @@ define @vfsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -709,7 +709,7 @@ define @vfsub_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv7f64( %va, %b, %m, i32 %evl) @@ -721,7 +721,7 @@ define @vfsub_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f64( %va, %b, %m, i32 %evl) @@ -743,7 +743,7 @@ define @vfsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -9,7 +9,7 @@ define @vmul_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vmul_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vmul_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -73,7 +73,7 @@ define @vmul_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i8( %va, %b, %m, i32 %evl) @@ -95,7 +95,7 @@ define @vmul_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -123,7 +123,7 @@ define @vmul_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i8( %va, %b, %m, i32 %evl) @@ -145,7 +145,7 @@ define @vmul_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -173,7 +173,7 @@ define @vmul_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i8( %va, %b, %m, i32 %evl) @@ -195,7 +195,7 @@ define @vmul_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -223,7 +223,7 @@ define @vmul_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i8( %va, %b, %m, i32 %evl) @@ -245,7 +245,7 @@ define @vmul_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -273,7 +273,7 @@ define @vmul_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv32i8( %va, %b, %m, i32 %evl) @@ -295,7 +295,7 @@ define @vmul_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -323,7 +323,7 @@ define @vmul_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv64i8( %va, %b, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vmul_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -373,7 +373,7 @@ define @vmul_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i16( %va, %b, %m, i32 %evl) @@ -395,7 +395,7 @@ define @vmul_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -423,7 +423,7 @@ define @vmul_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i16( %va, %b, %m, i32 %evl) @@ -445,7 +445,7 @@ define @vmul_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -473,7 +473,7 @@ define @vmul_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i16( %va, %b, %m, i32 %evl) @@ -495,7 +495,7 @@ define @vmul_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -523,7 +523,7 @@ define @vmul_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i16( %va, %b, %m, i32 %evl) @@ -545,7 +545,7 @@ define @vmul_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -573,7 +573,7 @@ define @vmul_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i16( %va, %b, %m, i32 %evl) @@ -595,7 +595,7 @@ define @vmul_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -623,7 +623,7 @@ define @vmul_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv32i16( %va, %b, %m, i32 %evl) @@ -645,7 +645,7 @@ define @vmul_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -673,7 +673,7 @@ define @vmul_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i32( %va, %b, %m, i32 %evl) @@ -695,7 +695,7 @@ define @vmul_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -723,7 +723,7 @@ define @vmul_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i32( %va, %b, %m, i32 %evl) @@ -745,7 +745,7 @@ define @vmul_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -773,7 +773,7 @@ define @vmul_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i32( %va, %b, %m, i32 %evl) @@ -795,7 +795,7 @@ define @vmul_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -823,7 +823,7 @@ define @vmul_vv_nxv7i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv7i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv7i32( %va, %b, %m, i32 %evl) @@ -845,7 +845,7 @@ define @vmul_vx_nxv7i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv7i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -873,7 +873,7 @@ define @vmul_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i32( %va, %b, %m, i32 %evl) @@ -895,7 +895,7 @@ define @vmul_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -923,7 +923,7 @@ define @vmul_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i32( %va, %b, %m, i32 %evl) @@ -945,7 +945,7 @@ define @vmul_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -957,7 +957,7 @@ define @vmul_vx_nxv16i32_commute( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -985,7 +985,7 @@ define @vmul_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i64( %va, %b, %m, i32 %evl) @@ -1014,14 +1014,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1063,7 +1063,7 @@ define @vmul_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i64( %va, %b, %m, i32 %evl) @@ -1092,14 +1092,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1141,7 +1141,7 @@ define @vmul_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i64( %va, %b, %m, i32 %evl) @@ -1170,14 +1170,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1219,7 +1219,7 @@ define @vmul_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i64( %va, %b, %m, i32 %evl) @@ -1248,14 +1248,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -9,7 +9,7 @@ define @vor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vor_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ define @vor_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -99,7 +99,7 @@ define @vor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i8( %va, %b, %m, i32 %evl) @@ -121,7 +121,7 @@ define @vor_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -147,7 +147,7 @@ define @vor_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -175,7 +175,7 @@ define @vor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i8( %va, %b, %m, i32 %evl) @@ -197,7 +197,7 @@ define @vor_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -223,7 +223,7 @@ define @vor_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -251,7 +251,7 @@ define @vor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i8( %va, %b, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vor_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -299,7 +299,7 @@ define @vor_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -327,7 +327,7 @@ define @vor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i8( %va, %b, %m, i32 %evl) @@ -349,7 +349,7 @@ define @vor_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -375,7 +375,7 @@ define @vor_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -403,7 +403,7 @@ define @vor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv32i8( %va, %b, %m, i32 %evl) @@ -425,7 +425,7 @@ define @vor_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -451,7 +451,7 @@ define @vor_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -479,7 +479,7 @@ define @vor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv64i8( %va, %b, %m, i32 %evl) @@ -501,7 +501,7 @@ define @vor_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -527,7 +527,7 @@ define @vor_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -555,7 +555,7 @@ define @vor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i16( %va, %b, %m, i32 %evl) @@ -577,7 +577,7 @@ define @vor_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -603,7 +603,7 @@ define @vor_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -631,7 +631,7 @@ define @vor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i16( %va, %b, %m, i32 %evl) @@ -653,7 +653,7 @@ define @vor_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -679,7 +679,7 @@ define @vor_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -707,7 +707,7 @@ define @vor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i16( %va, %b, %m, i32 %evl) @@ -729,7 +729,7 @@ define @vor_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -755,7 +755,7 @@ define @vor_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -783,7 +783,7 @@ define @vor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i16( %va, %b, %m, i32 %evl) @@ -805,7 +805,7 @@ define @vor_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -831,7 +831,7 @@ define @vor_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -859,7 +859,7 @@ define @vor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i16( %va, %b, %m, i32 %evl) @@ -881,7 +881,7 @@ define @vor_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -907,7 +907,7 @@ define @vor_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -935,7 +935,7 @@ define @vor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv32i16( %va, %b, %m, i32 %evl) @@ -957,7 +957,7 @@ define @vor_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -983,7 +983,7 @@ define @vor_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -1011,7 +1011,7 @@ define @vor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i32( %va, %b, %m, i32 %evl) @@ -1033,7 +1033,7 @@ define @vor_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1059,7 +1059,7 @@ define @vor_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1087,7 +1087,7 @@ define @vor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i32( %va, %b, %m, i32 %evl) @@ -1109,7 +1109,7 @@ define @vor_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1121,7 +1121,7 @@ define @vor_vx_nxv2i32_commute( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1161,7 +1161,7 @@ define @vor_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1189,7 +1189,7 @@ define @vor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i32( %va, %b, %m, i32 %evl) @@ -1211,7 +1211,7 @@ define @vor_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1237,7 +1237,7 @@ define @vor_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1265,7 +1265,7 @@ define @vor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i32( %va, %b, %m, i32 %evl) @@ -1287,7 +1287,7 @@ define @vor_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1313,7 +1313,7 @@ define @vor_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1341,7 +1341,7 @@ define @vor_vv_nxv10i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv10i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv10i32( %va, %b, %m, i32 %evl) @@ -1363,7 +1363,7 @@ define @vor_vx_nxv10i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv10i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1389,7 +1389,7 @@ define @vor_vi_nxv10i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv10i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1417,7 +1417,7 @@ define @vor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i32( %va, %b, %m, i32 %evl) @@ -1439,7 +1439,7 @@ define @vor_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1465,7 +1465,7 @@ define @vor_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1493,7 +1493,7 @@ define @vor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i64( %va, %b, %m, i32 %evl) @@ -1522,14 +1522,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1569,7 +1569,7 @@ define @vor_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1597,7 +1597,7 @@ define @vor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i64( %va, %b, %m, i32 %evl) @@ -1626,14 +1626,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1673,7 +1673,7 @@ define @vor_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1701,7 +1701,7 @@ define @vor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i64( %va, %b, %m, i32 %evl) @@ -1730,14 +1730,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1777,7 +1777,7 @@ define @vor_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1805,7 +1805,7 @@ define @vor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i64( %va, %b, %m, i32 %evl) @@ -1834,14 +1834,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1881,7 +1881,7 @@ define @vor_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -9,14 +9,14 @@ define @vpgather_nxv1i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -29,14 +29,14 @@ define @vpgather_nxv2i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -47,7 +47,7 @@ define @vpgather_nxv2i8_sextload_nxv2i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 @@ -55,7 +55,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 @@ -68,7 +68,7 @@ define @vpgather_nxv2i8_zextload_nxv2i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 @@ -76,7 +76,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 @@ -89,7 +89,7 @@ define @vpgather_nxv2i8_sextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 @@ -97,7 +97,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 @@ -110,7 +110,7 @@ define @vpgather_nxv2i8_zextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 @@ -118,7 +118,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 @@ -131,7 +131,7 @@ define @vpgather_nxv2i8_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf8 v8, v10 @@ -139,7 +139,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf8 v8, v10 @@ -152,7 +152,7 @@ define @vpgather_nxv2i8_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf8 v8, v10 @@ -160,7 +160,7 @@ ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf8 v8, v10 @@ -175,14 +175,14 @@ define @vpgather_nxv4i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -215,14 +215,14 @@ define @vpgather_nxv8i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret @@ -235,7 +235,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 -; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -243,7 +243,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs @@ -270,7 +270,7 @@ ; RV32-NEXT: .LBB12_2: ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v10 -; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t ; RV32-NEXT: bltu a1, a2, .LBB12_4 ; RV32-NEXT: # %bb.3: @@ -278,7 +278,7 @@ ; RV32-NEXT: .LBB12_4: ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v8 -; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv4r.v v8, v16 @@ -310,7 +310,7 @@ ; RV64-NEXT: vslidedown.vx v0, v13, a6 ; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v11 -; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, ma ; RV64-NEXT: vluxei64.v v19, (a0), v24, v0.t ; RV64-NEXT: bltu a1, a5, .LBB12_6 ; RV64-NEXT: # %bb.5: @@ -325,7 +325,7 @@ ; RV64-NEXT: vslidedown.vx v0, v12, a6 ; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v9 -; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; RV64-NEXT: vluxei64.v v17, (a0), v24, v0.t ; RV64-NEXT: bltu a1, a3, .LBB12_10 ; RV64-NEXT: # %bb.9: @@ -333,7 +333,7 @@ ; RV64-NEXT: .LBB12_10: ; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v8 -; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t ; RV64-NEXT: bltu a2, a3, .LBB12_12 @@ -342,7 +342,7 @@ ; RV64-NEXT: .LBB12_12: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v10 -; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: vluxei64.v v18, (a0), v24, v0.t ; RV64-NEXT: vmv4r.v v8, v16 @@ -357,14 +357,14 @@ define @vpgather_nxv1i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -377,14 +377,14 @@ define @vpgather_nxv2i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -395,7 +395,7 @@ define @vpgather_nxv2i16_sextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 @@ -403,7 +403,7 @@ ; ; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 @@ -416,7 +416,7 @@ define @vpgather_nxv2i16_zextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 @@ -424,7 +424,7 @@ ; ; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 @@ -437,7 +437,7 @@ define @vpgather_nxv2i16_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf4 v8, v10 @@ -445,7 +445,7 @@ ; ; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 @@ -458,7 +458,7 @@ define @vpgather_nxv2i16_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf4 v8, v10 @@ -466,7 +466,7 @@ ; ; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 @@ -481,14 +481,14 @@ define @vpgather_nxv4i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -521,14 +521,14 @@ define @vpgather_nxv8i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret @@ -542,7 +542,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -551,7 +551,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -565,7 +565,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -574,7 +574,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -589,7 +589,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -598,7 +598,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -613,7 +613,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -622,7 +622,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -635,13 +635,13 @@ define @vpgather_nxv1i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -654,13 +654,13 @@ define @vpgather_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -671,7 +671,7 @@ define @vpgather_nxv2i32_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i32_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v10 @@ -679,7 +679,7 @@ ; ; RV64-LABEL: vpgather_nxv2i32_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 @@ -692,7 +692,7 @@ define @vpgather_nxv2i32_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i32_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v10 @@ -700,7 +700,7 @@ ; ; RV64-LABEL: vpgather_nxv2i32_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 @@ -715,13 +715,13 @@ define @vpgather_nxv4i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -753,13 +753,13 @@ define @vpgather_nxv8i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret @@ -773,7 +773,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -782,7 +782,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -796,7 +796,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -805,7 +805,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -820,7 +820,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -829,7 +829,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -844,7 +844,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -853,7 +853,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -867,7 +867,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -876,7 +876,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -891,7 +891,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -900,7 +900,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -914,7 +914,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -923,7 +923,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -936,14 +936,14 @@ define @vpgather_nxv1i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1i64.nxv1p0i64( %ptrs, %m, i32 %evl) @@ -955,14 +955,14 @@ define @vpgather_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i64.nxv2p0i64( %ptrs, %m, i32 %evl) @@ -974,14 +974,14 @@ define @vpgather_nxv4i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i64.nxv4p0i64( %ptrs, %m, i32 %evl) @@ -1012,14 +1012,14 @@ define @vpgather_nxv8i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) @@ -1032,7 +1032,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1041,7 +1041,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -1055,7 +1055,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1064,7 +1064,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1079,7 +1079,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1088,7 +1088,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1103,7 +1103,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1112,7 +1112,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -1126,7 +1126,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1135,7 +1135,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1150,7 +1150,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1159,7 +1159,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1173,7 +1173,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1182,7 +1182,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -1195,7 +1195,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1204,7 +1204,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1218,7 +1218,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1227,7 +1227,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1242,7 +1242,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1250,7 +1250,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -1263,14 +1263,14 @@ define @vpgather_nxv1f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1283,14 +1283,14 @@ define @vpgather_nxv2f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1303,14 +1303,14 @@ define @vpgather_nxv4f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1343,14 +1343,14 @@ define @vpgather_nxv8f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret @@ -1364,7 +1364,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1373,7 +1373,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1387,7 +1387,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1396,7 +1396,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1411,7 +1411,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1420,7 +1420,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1435,7 +1435,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; @@ -1444,7 +1444,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1457,13 +1457,13 @@ define @vpgather_nxv1f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1476,13 +1476,13 @@ define @vpgather_nxv2f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -1495,13 +1495,13 @@ define @vpgather_nxv4f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1533,13 +1533,13 @@ define @vpgather_nxv8f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret @@ -1553,7 +1553,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1562,7 +1562,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1576,7 +1576,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1585,7 +1585,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1600,7 +1600,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1609,7 +1609,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1624,7 +1624,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1633,7 +1633,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1647,7 +1647,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1656,7 +1656,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1671,7 +1671,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1680,7 +1680,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1694,7 +1694,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1703,7 +1703,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1716,14 +1716,14 @@ define @vpgather_nxv1f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1f64.nxv1p0f64( %ptrs, %m, i32 %evl) @@ -1735,14 +1735,14 @@ define @vpgather_nxv2f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2f64.nxv2p0f64( %ptrs, %m, i32 %evl) @@ -1754,14 +1754,14 @@ define @vpgather_nxv4f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f64.nxv4p0f64( %ptrs, %m, i32 %evl) @@ -1792,14 +1792,14 @@ define @vpgather_nxv6f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv6f64.nxv6p0f64( %ptrs, %m, i32 %evl) @@ -1812,7 +1812,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1821,7 +1821,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1835,7 +1835,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1844,7 +1844,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1859,7 +1859,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1868,7 +1868,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1883,7 +1883,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1892,7 +1892,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1906,7 +1906,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1915,7 +1915,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1930,7 +1930,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1939,7 +1939,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1953,7 +1953,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1962,7 +1962,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1975,7 +1975,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -1984,7 +1984,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1998,7 +1998,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2007,7 +2007,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -2022,7 +2022,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2030,7 +2030,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2043,14 +2043,14 @@ define @vpgather_nxv8f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) @@ -2063,7 +2063,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2072,7 +2072,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2086,7 +2086,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2095,7 +2095,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -2110,7 +2110,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2119,7 +2119,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -2134,7 +2134,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2143,7 +2143,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2157,7 +2157,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2166,7 +2166,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -2181,7 +2181,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2190,7 +2190,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -2204,7 +2204,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2213,7 +2213,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2226,7 +2226,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2235,7 +2235,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -2249,7 +2249,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2258,7 +2258,7 @@ ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -2273,7 +2273,7 @@ ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; @@ -2281,7 +2281,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2305,13 +2305,13 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB102_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (zero), v12, v0.t ; RV32-NEXT: bltu a0, a1, .LBB102_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB102_4: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t ; RV32-NEXT: vmv.v.v v8, v24 @@ -2330,13 +2330,13 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB102_2: -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t ; RV64-NEXT: bltu a0, a1, .LBB102_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB102_4: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t ; RV64-NEXT: ret @@ -2361,13 +2361,13 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a4 ; RV32-NEXT: .LBB103_2: -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: bltu a1, a2, .LBB103_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB103_4: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2391,13 +2391,13 @@ ; RV64-NEXT: .LBB103_2: ; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: bltu a1, a2, .LBB103_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB103_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2423,13 +2423,13 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a4 ; RV32-NEXT: .LBB104_2: -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: bltu a1, a2, .LBB104_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB104_4: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2453,13 +2453,13 @@ ; RV64-NEXT: .LBB104_2: ; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: bltu a1, a2, .LBB104_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB104_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret @@ -2486,13 +2486,13 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a4 ; RV32-NEXT: .LBB105_2: -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: bltu a1, a2, .LBB105_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB105_4: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2516,13 +2516,13 @@ ; RV64-NEXT: .LBB105_2: ; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: bltu a1, a2, .LBB105_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB105_4: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -9,7 +9,7 @@ define @vpload_nxv1i8(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i8.p0nxv1i8(* %ptr, %m, i32 %evl) @@ -33,7 +33,7 @@ define @vpload_nxv2i8(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i8.p0nxv2i8(* %ptr, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vpload_nxv3i8(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv3i8.p0nxv3i8(* %ptr, %m, i32 %evl) @@ -57,7 +57,7 @@ define @vpload_nxv4i8(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i8.p0nxv4i8(* %ptr, %m, i32 %evl) @@ -69,7 +69,7 @@ define @vpload_nxv8i8(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i8.p0nxv8i8(* %ptr, %m, i32 %evl) @@ -93,7 +93,7 @@ define @vpload_nxv1i16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i16.p0nxv1i16(* %ptr, %m, i32 %evl) @@ -105,7 +105,7 @@ define @vpload_nxv2i16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i16.p0nxv2i16(* %ptr, %m, i32 %evl) @@ -129,7 +129,7 @@ define @vpload_nxv4i16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i16.p0nxv4i16(* %ptr, %m, i32 %evl) @@ -141,7 +141,7 @@ define @vpload_nxv8i16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i16.p0nxv8i16(* %ptr, %m, i32 %evl) @@ -153,7 +153,7 @@ define @vpload_nxv1i32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i32.p0nxv1i32(* %ptr, %m, i32 %evl) @@ -165,7 +165,7 @@ define @vpload_nxv2i32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i32.p0nxv2i32(* %ptr, %m, i32 %evl) @@ -177,7 +177,7 @@ define @vpload_nxv4i32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i32.p0nxv4i32(* %ptr, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vpload_nxv8i32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i32.p0nxv8i32(* %ptr, %m, i32 %evl) @@ -213,7 +213,7 @@ define @vpload_nxv1i64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i64.p0nxv1i64(* %ptr, %m, i32 %evl) @@ -237,7 +237,7 @@ define @vpload_nxv2i64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i64.p0nxv2i64(* %ptr, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vpload_nxv4i64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i64.p0nxv4i64(* %ptr, %m, i32 %evl) @@ -261,7 +261,7 @@ define @vpload_nxv8i64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i64.p0nxv8i64(* %ptr, %m, i32 %evl) @@ -273,7 +273,7 @@ define @vpload_nxv1f16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f16.p0nxv1f16(* %ptr, %m, i32 %evl) @@ -285,7 +285,7 @@ define @vpload_nxv2f16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f16.p0nxv2f16(* %ptr, %m, i32 %evl) @@ -309,7 +309,7 @@ define @vpload_nxv4f16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f16.p0nxv4f16(* %ptr, %m, i32 %evl) @@ -321,7 +321,7 @@ define @vpload_nxv8f16(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f16.p0nxv8f16(* %ptr, %m, i32 %evl) @@ -333,7 +333,7 @@ define @vpload_nxv1f32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f32.p0nxv1f32(* %ptr, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vpload_nxv2f32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f32.p0nxv2f32(* %ptr, %m, i32 %evl) @@ -357,7 +357,7 @@ define @vpload_nxv4f32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f32.p0nxv4f32(* %ptr, %m, i32 %evl) @@ -369,7 +369,7 @@ define @vpload_nxv8f32(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f32.p0nxv8f32(* %ptr, %m, i32 %evl) @@ -393,7 +393,7 @@ define @vpload_nxv1f64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f64.p0nxv1f64(* %ptr, %m, i32 %evl) @@ -405,7 +405,7 @@ define @vpload_nxv2f64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f64.p0nxv2f64(* %ptr, %m, i32 %evl) @@ -417,7 +417,7 @@ define @vpload_nxv4f64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f64.p0nxv4f64(* %ptr, %m, i32 %evl) @@ -441,7 +441,7 @@ define @vpload_nxv8f64(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f64.p0nxv8f64(* %ptr, %m, i32 %evl) @@ -466,13 +466,13 @@ ; CHECK-NEXT: .LBB37_2: ; CHECK-NEXT: slli a4, a2, 3 ; CHECK-NEXT: add a4, a0, a4 -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a4), v0.t ; CHECK-NEXT: bltu a1, a2, .LBB37_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB37_4: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret @@ -514,7 +514,7 @@ ; CHECK-NEXT: vslidedown.vx v0, v8, t0 ; CHECK-NEXT: slli t0, a3, 3 ; CHECK-NEXT: add t0, a0, t0 -; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (t0), v0.t ; CHECK-NEXT: srli a6, a3, 2 ; CHECK-NEXT: sub t0, a2, a5 @@ -530,13 +530,13 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv a7, a3 ; CHECK-NEXT: .LBB38_8: -; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a2), v0.t ; CHECK-NEXT: bltu a4, a3, .LBB38_10 ; CHECK-NEXT: # %bb.9: ; CHECK-NEXT: mv a4, a3 ; CHECK-NEXT: .LBB38_10: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: vs1r.v v24, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vadd.vv v9, v9, v9 ; CHECK-NEXT: vsra.vi v9, v9, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -29,7 +29,7 @@ define @vrem_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i8( %va, %b, %m, i32 %evl) @@ -51,7 +51,7 @@ define @vrem_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -79,7 +79,7 @@ define @vrem_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i8( %va, %b, %m, i32 %evl) @@ -101,7 +101,7 @@ define @vrem_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -129,7 +129,7 @@ define @vrem_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv3i8( %va, %b, %m, i32 %evl) @@ -141,7 +141,7 @@ define @vrem_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i8( %va, %b, %m, i32 %evl) @@ -163,7 +163,7 @@ define @vrem_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -191,7 +191,7 @@ define @vrem_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i8( %va, %b, %m, i32 %evl) @@ -213,7 +213,7 @@ define @vrem_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -241,7 +241,7 @@ define @vrem_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i8( %va, %b, %m, i32 %evl) @@ -263,7 +263,7 @@ define @vrem_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -291,7 +291,7 @@ define @vrem_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv32i8( %va, %b, %m, i32 %evl) @@ -313,7 +313,7 @@ define @vrem_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -341,7 +341,7 @@ define @vrem_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv64i8( %va, %b, %m, i32 %evl) @@ -363,7 +363,7 @@ define @vrem_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -391,7 +391,7 @@ define @vrem_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i16( %va, %b, %m, i32 %evl) @@ -413,7 +413,7 @@ define @vrem_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -441,7 +441,7 @@ define @vrem_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i16( %va, %b, %m, i32 %evl) @@ -463,7 +463,7 @@ define @vrem_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -491,7 +491,7 @@ define @vrem_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i16( %va, %b, %m, i32 %evl) @@ -513,7 +513,7 @@ define @vrem_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -541,7 +541,7 @@ define @vrem_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i16( %va, %b, %m, i32 %evl) @@ -563,7 +563,7 @@ define @vrem_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -591,7 +591,7 @@ define @vrem_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i16( %va, %b, %m, i32 %evl) @@ -613,7 +613,7 @@ define @vrem_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -641,7 +641,7 @@ define @vrem_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv32i16( %va, %b, %m, i32 %evl) @@ -663,7 +663,7 @@ define @vrem_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -691,7 +691,7 @@ define @vrem_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i32( %va, %b, %m, i32 %evl) @@ -713,7 +713,7 @@ define @vrem_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -741,7 +741,7 @@ define @vrem_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i32( %va, %b, %m, i32 %evl) @@ -763,7 +763,7 @@ define @vrem_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -791,7 +791,7 @@ define @vrem_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i32( %va, %b, %m, i32 %evl) @@ -813,7 +813,7 @@ define @vrem_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -841,7 +841,7 @@ define @vrem_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i32( %va, %b, %m, i32 %evl) @@ -863,7 +863,7 @@ define @vrem_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -891,7 +891,7 @@ define @vrem_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i32( %va, %b, %m, i32 %evl) @@ -913,7 +913,7 @@ define @vrem_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -941,7 +941,7 @@ define @vrem_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i64( %va, %b, %m, i32 %evl) @@ -970,14 +970,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1019,7 +1019,7 @@ define @vrem_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i64( %va, %b, %m, i32 %evl) @@ -1048,14 +1048,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1097,7 +1097,7 @@ define @vrem_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i64( %va, %b, %m, i32 %evl) @@ -1126,14 +1126,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1175,7 +1175,7 @@ define @vrem_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i64( %va, %b, %m, i32 %evl) @@ -1204,14 +1204,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -28,7 +28,7 @@ define @vremu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i8( %va, %b, %m, i32 %evl) @@ -50,7 +50,7 @@ define @vremu_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -78,7 +78,7 @@ define @vremu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i8( %va, %b, %m, i32 %evl) @@ -100,7 +100,7 @@ define @vremu_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -128,7 +128,7 @@ define @vremu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv3i8( %va, %b, %m, i32 %evl) @@ -140,7 +140,7 @@ define @vremu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i8( %va, %b, %m, i32 %evl) @@ -162,7 +162,7 @@ define @vremu_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -190,7 +190,7 @@ define @vremu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i8( %va, %b, %m, i32 %evl) @@ -212,7 +212,7 @@ define @vremu_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -240,7 +240,7 @@ define @vremu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i8( %va, %b, %m, i32 %evl) @@ -262,7 +262,7 @@ define @vremu_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -290,7 +290,7 @@ define @vremu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv32i8( %va, %b, %m, i32 %evl) @@ -312,7 +312,7 @@ define @vremu_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -340,7 +340,7 @@ define @vremu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv64i8( %va, %b, %m, i32 %evl) @@ -362,7 +362,7 @@ define @vremu_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -390,7 +390,7 @@ define @vremu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i16( %va, %b, %m, i32 %evl) @@ -412,7 +412,7 @@ define @vremu_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -440,7 +440,7 @@ define @vremu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i16( %va, %b, %m, i32 %evl) @@ -462,7 +462,7 @@ define @vremu_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -490,7 +490,7 @@ define @vremu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i16( %va, %b, %m, i32 %evl) @@ -512,7 +512,7 @@ define @vremu_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -540,7 +540,7 @@ define @vremu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i16( %va, %b, %m, i32 %evl) @@ -562,7 +562,7 @@ define @vremu_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -590,7 +590,7 @@ define @vremu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i16( %va, %b, %m, i32 %evl) @@ -612,7 +612,7 @@ define @vremu_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -640,7 +640,7 @@ define @vremu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv32i16( %va, %b, %m, i32 %evl) @@ -662,7 +662,7 @@ define @vremu_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -690,7 +690,7 @@ define @vremu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i32( %va, %b, %m, i32 %evl) @@ -712,7 +712,7 @@ define @vremu_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -740,7 +740,7 @@ define @vremu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i32( %va, %b, %m, i32 %evl) @@ -762,7 +762,7 @@ define @vremu_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -790,7 +790,7 @@ define @vremu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i32( %va, %b, %m, i32 %evl) @@ -812,7 +812,7 @@ define @vremu_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -840,7 +840,7 @@ define @vremu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i32( %va, %b, %m, i32 %evl) @@ -862,7 +862,7 @@ define @vremu_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -890,7 +890,7 @@ define @vremu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i32( %va, %b, %m, i32 %evl) @@ -912,7 +912,7 @@ define @vremu_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -940,7 +940,7 @@ define @vremu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i64( %va, %b, %m, i32 %evl) @@ -969,14 +969,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1018,7 +1018,7 @@ define @vremu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i64( %va, %b, %m, i32 %evl) @@ -1047,14 +1047,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1096,7 +1096,7 @@ define @vremu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i64( %va, %b, %m, i32 %evl) @@ -1125,14 +1125,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1174,7 +1174,7 @@ define @vremu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i64( %va, %b, %m, i32 %evl) @@ -1203,14 +1203,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -9,7 +9,7 @@ define @vrsub_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -35,7 +35,7 @@ define @vrsub_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -63,7 +63,7 @@ define @vrsub_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vrsub_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -117,7 +117,7 @@ define @vrsub_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ define @vrsub_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -171,7 +171,7 @@ define @vrsub_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -197,7 +197,7 @@ define @vrsub_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -225,7 +225,7 @@ define @vrsub_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -251,7 +251,7 @@ define @vrsub_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -279,7 +279,7 @@ define @vrsub_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -305,7 +305,7 @@ define @vrsub_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -333,7 +333,7 @@ define @vrsub_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -359,7 +359,7 @@ define @vrsub_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -387,7 +387,7 @@ define @vrsub_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -413,7 +413,7 @@ define @vrsub_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -441,7 +441,7 @@ define @vrsub_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -467,7 +467,7 @@ define @vrsub_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -495,7 +495,7 @@ define @vrsub_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -521,7 +521,7 @@ define @vrsub_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -549,7 +549,7 @@ define @vrsub_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -575,7 +575,7 @@ define @vrsub_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -603,7 +603,7 @@ define @vrsub_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -629,7 +629,7 @@ define @vrsub_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -657,7 +657,7 @@ define @vrsub_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -683,7 +683,7 @@ define @vrsub_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -711,7 +711,7 @@ define @vrsub_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -737,7 +737,7 @@ define @vrsub_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -765,7 +765,7 @@ define @vrsub_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -791,7 +791,7 @@ define @vrsub_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -819,7 +819,7 @@ define @vrsub_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -845,7 +845,7 @@ define @vrsub_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -873,7 +873,7 @@ define @vrsub_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -899,7 +899,7 @@ define @vrsub_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -927,7 +927,7 @@ define @vrsub_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -953,7 +953,7 @@ define @vrsub_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -988,14 +988,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1035,7 +1035,7 @@ define @vrsub_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1070,14 +1070,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1117,7 +1117,7 @@ define @vrsub_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1152,14 +1152,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1199,7 +1199,7 @@ define @vrsub_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1234,14 +1234,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1281,7 +1281,7 @@ define @vrsub_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -820,7 +820,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli a0, 6, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %mask) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -7,7 +7,7 @@ define @vsext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vsext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vsext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @vsext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vsext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -127,7 +127,7 @@ define @vsext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -163,13 +163,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB12_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v10, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB12_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vsext.vf4 v24, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vand.vx v9, v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -27,7 +27,7 @@ define @vsll_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i8( %va, %b, %m, i32 %evl) @@ -49,7 +49,7 @@ define @vsll_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -75,7 +75,7 @@ define @vsll_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -103,7 +103,7 @@ define @vsll_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i8( %va, %b, %m, i32 %evl) @@ -125,7 +125,7 @@ define @vsll_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -151,7 +151,7 @@ define @vsll_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -179,7 +179,7 @@ define @vsll_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i8( %va, %b, %m, i32 %evl) @@ -201,7 +201,7 @@ define @vsll_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -227,7 +227,7 @@ define @vsll_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -255,7 +255,7 @@ define @vsll_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv5i8( %va, %b, %m, i32 %evl) @@ -267,7 +267,7 @@ define @vsll_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i8( %va, %b, %m, i32 %evl) @@ -289,7 +289,7 @@ define @vsll_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -315,7 +315,7 @@ define @vsll_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -343,7 +343,7 @@ define @vsll_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i8( %va, %b, %m, i32 %evl) @@ -365,7 +365,7 @@ define @vsll_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -391,7 +391,7 @@ define @vsll_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -419,7 +419,7 @@ define @vsll_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv32i8( %va, %b, %m, i32 %evl) @@ -441,7 +441,7 @@ define @vsll_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -467,7 +467,7 @@ define @vsll_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -495,7 +495,7 @@ define @vsll_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv64i8( %va, %b, %m, i32 %evl) @@ -517,7 +517,7 @@ define @vsll_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -543,7 +543,7 @@ define @vsll_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -571,7 +571,7 @@ define @vsll_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i16( %va, %b, %m, i32 %evl) @@ -593,7 +593,7 @@ define @vsll_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -619,7 +619,7 @@ define @vsll_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -647,7 +647,7 @@ define @vsll_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i16( %va, %b, %m, i32 %evl) @@ -669,7 +669,7 @@ define @vsll_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -695,7 +695,7 @@ define @vsll_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -723,7 +723,7 @@ define @vsll_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i16( %va, %b, %m, i32 %evl) @@ -745,7 +745,7 @@ define @vsll_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -771,7 +771,7 @@ define @vsll_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -799,7 +799,7 @@ define @vsll_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i16( %va, %b, %m, i32 %evl) @@ -821,7 +821,7 @@ define @vsll_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -847,7 +847,7 @@ define @vsll_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -875,7 +875,7 @@ define @vsll_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i16( %va, %b, %m, i32 %evl) @@ -897,7 +897,7 @@ define @vsll_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -923,7 +923,7 @@ define @vsll_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -951,7 +951,7 @@ define @vsll_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv32i16( %va, %b, %m, i32 %evl) @@ -973,7 +973,7 @@ define @vsll_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -999,7 +999,7 @@ define @vsll_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -1027,7 +1027,7 @@ define @vsll_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i32( %va, %b, %m, i32 %evl) @@ -1049,7 +1049,7 @@ define @vsll_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1075,7 +1075,7 @@ define @vsll_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1103,7 +1103,7 @@ define @vsll_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i32( %va, %b, %m, i32 %evl) @@ -1125,7 +1125,7 @@ define @vsll_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1151,7 +1151,7 @@ define @vsll_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1179,7 +1179,7 @@ define @vsll_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i32( %va, %b, %m, i32 %evl) @@ -1201,7 +1201,7 @@ define @vsll_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1227,7 +1227,7 @@ define @vsll_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1255,7 +1255,7 @@ define @vsll_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i32( %va, %b, %m, i32 %evl) @@ -1277,7 +1277,7 @@ define @vsll_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1303,7 +1303,7 @@ define @vsll_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1331,7 +1331,7 @@ define @vsll_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i32( %va, %b, %m, i32 %evl) @@ -1353,7 +1353,7 @@ define @vsll_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1379,7 +1379,7 @@ define @vsll_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1407,7 +1407,7 @@ define @vsll_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i64( %va, %b, %m, i32 %evl) @@ -1429,13 +1429,13 @@ define @vsll_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1467,7 +1467,7 @@ define @vsll_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1495,7 +1495,7 @@ define @vsll_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i64( %va, %b, %m, i32 %evl) @@ -1517,13 +1517,13 @@ define @vsll_vx_nxv2i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1555,7 +1555,7 @@ define @vsll_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1583,7 +1583,7 @@ define @vsll_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i64( %va, %b, %m, i32 %evl) @@ -1605,13 +1605,13 @@ define @vsll_vx_nxv4i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1643,7 +1643,7 @@ define @vsll_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1671,7 +1671,7 @@ define @vsll_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i64( %va, %b, %m, i32 %evl) @@ -1693,13 +1693,13 @@ define @vsll_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1731,7 +1731,7 @@ define @vsll_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -7,7 +7,7 @@ define @vsitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -34,7 +34,7 @@ define @vsitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -61,7 +61,7 @@ define @vsitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v9, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f16.nxv2i7( %va, %m, i32 %evl) @@ -22,7 +22,7 @@ define @vsitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vsitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, %m, i32 %evl) @@ -68,7 +68,7 @@ define @vsitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -92,9 +92,9 @@ define @vsitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, %m, i32 %evl) @@ -118,7 +118,7 @@ define @vsitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define @vsitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -166,7 +166,7 @@ define @vsitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, %m, i32 %evl) @@ -188,7 +188,7 @@ define @vsitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -212,7 +212,7 @@ define @vsitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -236,7 +236,7 @@ define @vsitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -260,7 +260,7 @@ define @vsitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define @vsitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, %m, i32 %evl) @@ -325,13 +325,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -362,13 +362,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vand.vx v9, v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -29,7 +29,7 @@ define @vsra_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i8( %va, %b, %m, i32 %evl) @@ -51,7 +51,7 @@ define @vsra_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -77,7 +77,7 @@ define @vsra_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -105,7 +105,7 @@ define @vsra_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i8( %va, %b, %m, i32 %evl) @@ -127,7 +127,7 @@ define @vsra_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -153,7 +153,7 @@ define @vsra_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -181,7 +181,7 @@ define @vsra_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i8( %va, %b, %m, i32 %evl) @@ -203,7 +203,7 @@ define @vsra_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -229,7 +229,7 @@ define @vsra_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -257,7 +257,7 @@ define @vsra_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i8( %va, %b, %m, i32 %evl) @@ -279,7 +279,7 @@ define @vsra_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -305,7 +305,7 @@ define @vsra_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -333,7 +333,7 @@ define @vsra_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i8( %va, %b, %m, i32 %evl) @@ -355,7 +355,7 @@ define @vsra_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -381,7 +381,7 @@ define @vsra_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -409,7 +409,7 @@ define @vsra_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv32i8( %va, %b, %m, i32 %evl) @@ -431,7 +431,7 @@ define @vsra_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -457,7 +457,7 @@ define @vsra_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -485,7 +485,7 @@ define @vsra_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv64i8( %va, %b, %m, i32 %evl) @@ -507,7 +507,7 @@ define @vsra_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -533,7 +533,7 @@ define @vsra_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -561,7 +561,7 @@ define @vsra_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i16( %va, %b, %m, i32 %evl) @@ -583,7 +583,7 @@ define @vsra_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -609,7 +609,7 @@ define @vsra_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -637,7 +637,7 @@ define @vsra_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i16( %va, %b, %m, i32 %evl) @@ -659,7 +659,7 @@ define @vsra_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -685,7 +685,7 @@ define @vsra_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -713,7 +713,7 @@ define @vsra_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i16( %va, %b, %m, i32 %evl) @@ -735,7 +735,7 @@ define @vsra_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -761,7 +761,7 @@ define @vsra_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -789,7 +789,7 @@ define @vsra_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i16( %va, %b, %m, i32 %evl) @@ -811,7 +811,7 @@ define @vsra_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -837,7 +837,7 @@ define @vsra_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -865,7 +865,7 @@ define @vsra_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i16( %va, %b, %m, i32 %evl) @@ -887,7 +887,7 @@ define @vsra_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -913,7 +913,7 @@ define @vsra_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -941,7 +941,7 @@ define @vsra_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv32i16( %va, %b, %m, i32 %evl) @@ -963,7 +963,7 @@ define @vsra_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -989,7 +989,7 @@ define @vsra_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -1017,7 +1017,7 @@ define @vsra_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i32( %va, %b, %m, i32 %evl) @@ -1039,7 +1039,7 @@ define @vsra_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1065,7 +1065,7 @@ define @vsra_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1093,7 +1093,7 @@ define @vsra_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i32( %va, %b, %m, i32 %evl) @@ -1115,7 +1115,7 @@ define @vsra_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1141,7 +1141,7 @@ define @vsra_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1169,7 +1169,7 @@ define @vsra_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i32( %va, %b, %m, i32 %evl) @@ -1191,7 +1191,7 @@ define @vsra_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1217,7 +1217,7 @@ define @vsra_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1245,7 +1245,7 @@ define @vsra_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i32( %va, %b, %m, i32 %evl) @@ -1267,7 +1267,7 @@ define @vsra_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1293,7 +1293,7 @@ define @vsra_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1321,7 +1321,7 @@ define @vsra_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i32( %va, %b, %m, i32 %evl) @@ -1343,7 +1343,7 @@ define @vsra_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1369,7 +1369,7 @@ define @vsra_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1397,7 +1397,7 @@ define @vsra_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i64( %va, %b, %m, i32 %evl) @@ -1419,13 +1419,13 @@ define @vsra_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1457,7 +1457,7 @@ define @vsra_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1485,7 +1485,7 @@ define @vsra_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i64( %va, %b, %m, i32 %evl) @@ -1507,13 +1507,13 @@ define @vsra_vx_nxv2i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1545,7 +1545,7 @@ define @vsra_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1573,7 +1573,7 @@ define @vsra_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i64( %va, %b, %m, i32 %evl) @@ -1595,13 +1595,13 @@ define @vsra_vx_nxv4i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1633,7 +1633,7 @@ define @vsra_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1661,7 +1661,7 @@ define @vsra_vv_nxv6i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv6i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv6i64( %va, %b, %m, i32 %evl) @@ -1673,7 +1673,7 @@ define @vsra_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i64( %va, %b, %m, i32 %evl) @@ -1695,13 +1695,13 @@ define @vsra_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1733,7 +1733,7 @@ define @vsra_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -28,7 +28,7 @@ define @vsrl_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i8( %va, %b, %m, i32 %evl) @@ -50,7 +50,7 @@ define @vsrl_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -76,7 +76,7 @@ define @vsrl_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -104,7 +104,7 @@ define @vsrl_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i8( %va, %b, %m, i32 %evl) @@ -126,7 +126,7 @@ define @vsrl_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -152,7 +152,7 @@ define @vsrl_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -180,7 +180,7 @@ define @vsrl_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i8( %va, %b, %m, i32 %evl) @@ -202,7 +202,7 @@ define @vsrl_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -228,7 +228,7 @@ define @vsrl_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -256,7 +256,7 @@ define @vsrl_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i8( %va, %b, %m, i32 %evl) @@ -278,7 +278,7 @@ define @vsrl_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -304,7 +304,7 @@ define @vsrl_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -332,7 +332,7 @@ define @vsrl_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i8( %va, %b, %m, i32 %evl) @@ -354,7 +354,7 @@ define @vsrl_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -380,7 +380,7 @@ define @vsrl_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -408,7 +408,7 @@ define @vsrl_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv32i8( %va, %b, %m, i32 %evl) @@ -430,7 +430,7 @@ define @vsrl_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -456,7 +456,7 @@ define @vsrl_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -484,7 +484,7 @@ define @vsrl_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv64i8( %va, %b, %m, i32 %evl) @@ -506,7 +506,7 @@ define @vsrl_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -532,7 +532,7 @@ define @vsrl_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -560,7 +560,7 @@ define @vsrl_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i16( %va, %b, %m, i32 %evl) @@ -582,7 +582,7 @@ define @vsrl_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -608,7 +608,7 @@ define @vsrl_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -636,7 +636,7 @@ define @vsrl_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i16( %va, %b, %m, i32 %evl) @@ -658,7 +658,7 @@ define @vsrl_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -684,7 +684,7 @@ define @vsrl_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -712,7 +712,7 @@ define @vsrl_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i16( %va, %b, %m, i32 %evl) @@ -734,7 +734,7 @@ define @vsrl_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -760,7 +760,7 @@ define @vsrl_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -788,7 +788,7 @@ define @vsrl_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i16( %va, %b, %m, i32 %evl) @@ -810,7 +810,7 @@ define @vsrl_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -836,7 +836,7 @@ define @vsrl_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -864,7 +864,7 @@ define @vsrl_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i16( %va, %b, %m, i32 %evl) @@ -886,7 +886,7 @@ define @vsrl_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -912,7 +912,7 @@ define @vsrl_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -940,7 +940,7 @@ define @vsrl_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv32i16( %va, %b, %m, i32 %evl) @@ -962,7 +962,7 @@ define @vsrl_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -988,7 +988,7 @@ define @vsrl_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1016,7 +1016,7 @@ define @vsrl_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i32( %va, %b, %m, i32 %evl) @@ -1038,7 +1038,7 @@ define @vsrl_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1064,7 +1064,7 @@ define @vsrl_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1092,7 +1092,7 @@ define @vsrl_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i32( %va, %b, %m, i32 %evl) @@ -1114,7 +1114,7 @@ define @vsrl_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1140,7 +1140,7 @@ define @vsrl_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1168,7 +1168,7 @@ define @vsrl_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i32( %va, %b, %m, i32 %evl) @@ -1190,7 +1190,7 @@ define @vsrl_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1216,7 +1216,7 @@ define @vsrl_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1244,7 +1244,7 @@ define @vsrl_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i32( %va, %b, %m, i32 %evl) @@ -1266,7 +1266,7 @@ define @vsrl_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1292,7 +1292,7 @@ define @vsrl_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1320,7 +1320,7 @@ define @vsrl_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i32( %va, %b, %m, i32 %evl) @@ -1342,7 +1342,7 @@ define @vsrl_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1368,7 +1368,7 @@ define @vsrl_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1396,7 +1396,7 @@ define @vsrl_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i64( %va, %b, %m, i32 %evl) @@ -1418,13 +1418,13 @@ define @vsrl_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1456,7 +1456,7 @@ define @vsrl_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1484,7 +1484,7 @@ define @vsrl_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i64( %va, %b, %m, i32 %evl) @@ -1506,13 +1506,13 @@ define @vsrl_vx_nxv2i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1544,7 +1544,7 @@ define @vsrl_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1572,7 +1572,7 @@ define @vsrl_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i64( %va, %b, %m, i32 %evl) @@ -1594,13 +1594,13 @@ define @vsrl_vx_nxv4i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1632,7 +1632,7 @@ define @vsrl_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1660,7 +1660,7 @@ define @vsrl_vv_nxv5i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv5i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv5i64( %va, %b, %m, i32 %evl) @@ -1672,7 +1672,7 @@ define @vsrl_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i64( %va, %b, %m, i32 %evl) @@ -1694,13 +1694,13 @@ define @vsrl_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1732,7 +1732,7 @@ define @vsrl_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -9,7 +9,7 @@ define @vsub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vsub_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vsub_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -73,7 +73,7 @@ define @vsub_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i8( %va, %b, %m, i32 %evl) @@ -95,7 +95,7 @@ define @vsub_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -123,7 +123,7 @@ define @vsub_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i8( %va, %b, %m, i32 %evl) @@ -145,7 +145,7 @@ define @vsub_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -173,7 +173,7 @@ define @vsub_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv5i8( %va, %b, %m, i32 %evl) @@ -195,7 +195,7 @@ define @vsub_vx_nxv5i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -223,7 +223,7 @@ define @vsub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i8( %va, %b, %m, i32 %evl) @@ -245,7 +245,7 @@ define @vsub_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -273,7 +273,7 @@ define @vsub_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i8( %va, %b, %m, i32 %evl) @@ -295,7 +295,7 @@ define @vsub_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -323,7 +323,7 @@ define @vsub_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv32i8( %va, %b, %m, i32 %evl) @@ -345,7 +345,7 @@ define @vsub_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -373,7 +373,7 @@ define @vsub_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv64i8( %va, %b, %m, i32 %evl) @@ -395,7 +395,7 @@ define @vsub_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -423,7 +423,7 @@ define @vsub_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i16( %va, %b, %m, i32 %evl) @@ -445,7 +445,7 @@ define @vsub_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -473,7 +473,7 @@ define @vsub_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i16( %va, %b, %m, i32 %evl) @@ -495,7 +495,7 @@ define @vsub_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -523,7 +523,7 @@ define @vsub_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i16( %va, %b, %m, i32 %evl) @@ -545,7 +545,7 @@ define @vsub_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -573,7 +573,7 @@ define @vsub_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i16( %va, %b, %m, i32 %evl) @@ -595,7 +595,7 @@ define @vsub_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -623,7 +623,7 @@ define @vsub_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i16( %va, %b, %m, i32 %evl) @@ -645,7 +645,7 @@ define @vsub_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -673,7 +673,7 @@ define @vsub_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv32i16( %va, %b, %m, i32 %evl) @@ -695,7 +695,7 @@ define @vsub_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -723,7 +723,7 @@ define @vsub_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i32( %va, %b, %m, i32 %evl) @@ -745,7 +745,7 @@ define @vsub_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -773,7 +773,7 @@ define @vsub_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i32( %va, %b, %m, i32 %evl) @@ -795,7 +795,7 @@ define @vsub_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -823,7 +823,7 @@ define @vsub_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i32( %va, %b, %m, i32 %evl) @@ -845,7 +845,7 @@ define @vsub_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -873,7 +873,7 @@ define @vsub_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i32( %va, %b, %m, i32 %evl) @@ -895,7 +895,7 @@ define @vsub_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -923,7 +923,7 @@ define @vsub_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i32( %va, %b, %m, i32 %evl) @@ -945,7 +945,7 @@ define @vsub_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -973,7 +973,7 @@ define @vsub_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i64( %va, %b, %m, i32 %evl) @@ -1002,14 +1002,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1051,7 +1051,7 @@ define @vsub_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i64( %va, %b, %m, i32 %evl) @@ -1080,14 +1080,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1129,7 +1129,7 @@ define @vsub_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i64( %va, %b, %m, i32 %evl) @@ -1158,14 +1158,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1207,7 +1207,7 @@ define @vsub_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i64( %va, %b, %m, i32 %evl) @@ -1236,14 +1236,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -7,9 +7,8 @@ define @vtrunc_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i1.nxv2i16( %a, %m, i32 %vl) @@ -32,9 +31,8 @@ define @vtrunc_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i1.nxv2i32( %a, %m, i32 %vl) @@ -57,9 +55,8 @@ define @vtrunc_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v10, v8, 1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -7,7 +7,7 @@ define @vtrunc_nxv2i7_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i7.nxv2i16( %a, %m, i32 %vl) @@ -19,7 +19,7 @@ define @vtrunc_nxv2i8_nxv2i15( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i15( %a, %m, i32 %vl) @@ -31,7 +31,7 @@ define @vtrunc_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, %m, i32 %vl) @@ -53,9 +53,9 @@ define @vtrunc_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, %m, i32 %vl) @@ -79,11 +79,11 @@ define @vtrunc_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, %m, i32 %vl) @@ -109,7 +109,7 @@ define @vtrunc_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, %m, i32 %vl) @@ -131,9 +131,9 @@ define @vtrunc_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, %m, i32 %vl) @@ -168,18 +168,18 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB12_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB12_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -192,7 +192,7 @@ define @vtrunc_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -228,18 +228,18 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB15_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB15_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB15_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -264,18 +264,18 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB16_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB16_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB16_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -317,7 +317,7 @@ ; CHECK-NEXT: .LBB17_4: ; CHECK-NEXT: srli a7, a1, 2 ; CHECK-NEXT: slli t0, a1, 3 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v16, 0, v0.t ; CHECK-NEXT: bltu a5, a1, .LBB17_6 ; CHECK-NEXT: # %bb.5: @@ -327,7 +327,7 @@ ; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v1, v24, a7 ; CHECK-NEXT: add a7, a0, t0 -; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma ; CHECK-NEXT: sub a4, a2, a4 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a5, vlenb @@ -357,7 +357,7 @@ ; CHECK-NEXT: vl8re64.v v16, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -368,7 +368,7 @@ ; CHECK-NEXT: # %bb.11: ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB17_12: -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll @@ -7,7 +7,7 @@ define @vuitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -34,7 +34,7 @@ define @vuitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 @@ -61,7 +61,7 @@ define @vuitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v9, v8, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f16.nxv2i7( %va, %m, i32 %evl) @@ -22,7 +22,7 @@ define @vuitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vuitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f16.nxv2i16( %va, %m, i32 %evl) @@ -68,7 +68,7 @@ define @vuitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -92,9 +92,9 @@ define @vuitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f16.nxv2i64( %va, %m, i32 %evl) @@ -118,7 +118,7 @@ define @vuitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define @vuitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -166,7 +166,7 @@ define @vuitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f32.nxv2i32( %va, %m, i32 %evl) @@ -188,7 +188,7 @@ define @vuitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -212,7 +212,7 @@ define @vuitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -236,7 +236,7 @@ define @vuitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8, v0.t ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret @@ -260,7 +260,7 @@ define @vuitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8, v0.t ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define @vuitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f64.nxv2i64( %va, %m, i32 %evl) @@ -325,13 +325,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB25_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -362,13 +362,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -9,7 +9,7 @@ define @vxor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -23,7 +23,7 @@ define @vxor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i8( %va, %b, %m, i32 %evl) @@ -45,7 +45,7 @@ define @vxor_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ define @vxor_vi_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -97,7 +97,7 @@ define @vxor_vi_nxv1i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -125,7 +125,7 @@ define @vxor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i8( %va, %b, %m, i32 %evl) @@ -147,7 +147,7 @@ define @vxor_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -173,7 +173,7 @@ define @vxor_vi_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -199,7 +199,7 @@ define @vxor_vi_nxv2i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -227,7 +227,7 @@ define @vxor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i8( %va, %b, %m, i32 %evl) @@ -249,7 +249,7 @@ define @vxor_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -275,7 +275,7 @@ define @vxor_vi_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -301,7 +301,7 @@ define @vxor_vi_nxv4i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -329,7 +329,7 @@ define @vxor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i8( %va, %b, %m, i32 %evl) @@ -351,7 +351,7 @@ define @vxor_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -377,7 +377,7 @@ define @vxor_vi_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -403,7 +403,7 @@ define @vxor_vi_nxv8i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -431,7 +431,7 @@ define @vxor_vv_nxv15i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv15i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv15i8( %va, %b, %m, i32 %evl) @@ -453,7 +453,7 @@ define @vxor_vx_nxv15i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv15i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -479,7 +479,7 @@ define @vxor_vi_nxv15i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv15i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -505,7 +505,7 @@ define @vxor_vi_nxv15i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv15i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -533,7 +533,7 @@ define @vxor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i8( %va, %b, %m, i32 %evl) @@ -555,7 +555,7 @@ define @vxor_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -581,7 +581,7 @@ define @vxor_vi_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -607,7 +607,7 @@ define @vxor_vi_nxv16i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -635,7 +635,7 @@ define @vxor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i8( %va, %b, %m, i32 %evl) @@ -657,7 +657,7 @@ define @vxor_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -683,7 +683,7 @@ define @vxor_vi_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -709,7 +709,7 @@ define @vxor_vi_nxv32i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -737,7 +737,7 @@ define @vxor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv64i8( %va, %b, %m, i32 %evl) @@ -759,7 +759,7 @@ define @vxor_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -785,7 +785,7 @@ define @vxor_vi_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -811,7 +811,7 @@ define @vxor_vi_nxv64i8_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv64i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -839,7 +839,7 @@ define @vxor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i16( %va, %b, %m, i32 %evl) @@ -861,7 +861,7 @@ define @vxor_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -873,7 +873,7 @@ define @vxor_vx_nxv1i16_commute( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -899,7 +899,7 @@ define @vxor_vi_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -925,7 +925,7 @@ define @vxor_vi_nxv1i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -953,7 +953,7 @@ define @vxor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i16( %va, %b, %m, i32 %evl) @@ -975,7 +975,7 @@ define @vxor_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1001,7 +1001,7 @@ define @vxor_vi_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1027,7 +1027,7 @@ define @vxor_vi_nxv2i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1055,7 +1055,7 @@ define @vxor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i16( %va, %b, %m, i32 %evl) @@ -1077,7 +1077,7 @@ define @vxor_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1103,7 +1103,7 @@ define @vxor_vi_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1129,7 +1129,7 @@ define @vxor_vi_nxv4i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1157,7 +1157,7 @@ define @vxor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i16( %va, %b, %m, i32 %evl) @@ -1179,7 +1179,7 @@ define @vxor_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1205,7 +1205,7 @@ define @vxor_vi_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1231,7 +1231,7 @@ define @vxor_vi_nxv8i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1259,7 +1259,7 @@ define @vxor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i16( %va, %b, %m, i32 %evl) @@ -1281,7 +1281,7 @@ define @vxor_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1307,7 +1307,7 @@ define @vxor_vi_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1333,7 +1333,7 @@ define @vxor_vi_nxv16i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1361,7 +1361,7 @@ define @vxor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i16( %va, %b, %m, i32 %evl) @@ -1383,7 +1383,7 @@ define @vxor_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1409,7 +1409,7 @@ define @vxor_vi_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1435,7 +1435,7 @@ define @vxor_vi_nxv32i16_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1463,7 +1463,7 @@ define @vxor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i32( %va, %b, %m, i32 %evl) @@ -1485,7 +1485,7 @@ define @vxor_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1511,7 +1511,7 @@ define @vxor_vi_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1537,7 +1537,7 @@ define @vxor_vi_nxv1i32_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1565,7 +1565,7 @@ define @vxor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i32( %va, %b, %m, i32 %evl) @@ -1587,7 +1587,7 @@ define @vxor_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1613,7 +1613,7 @@ define @vxor_vi_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1639,7 +1639,7 @@ define @vxor_vi_nxv2i32_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1667,7 +1667,7 @@ define @vxor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i32( %va, %b, %m, i32 %evl) @@ -1689,7 +1689,7 @@ define @vxor_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1715,7 +1715,7 @@ define @vxor_vi_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1741,7 +1741,7 @@ define @vxor_vi_nxv4i32_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1769,7 +1769,7 @@ define @vxor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i32( %va, %b, %m, i32 %evl) @@ -1791,7 +1791,7 @@ define @vxor_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1817,7 +1817,7 @@ define @vxor_vi_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1843,7 +1843,7 @@ define @vxor_vi_nxv8i32_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1871,7 +1871,7 @@ define @vxor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i32( %va, %b, %m, i32 %evl) @@ -1893,7 +1893,7 @@ define @vxor_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1919,7 +1919,7 @@ define @vxor_vi_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1945,7 +1945,7 @@ define @vxor_vi_nxv16i32_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1973,7 +1973,7 @@ define @vxor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i64( %va, %b, %m, i32 %evl) @@ -2002,14 +2002,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2049,7 +2049,7 @@ define @vxor_vi_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2075,7 +2075,7 @@ define @vxor_vi_nxv1i64_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2103,7 +2103,7 @@ define @vxor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i64( %va, %b, %m, i32 %evl) @@ -2132,14 +2132,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2179,7 +2179,7 @@ define @vxor_vi_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2205,7 +2205,7 @@ define @vxor_vi_nxv2i64_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2233,7 +2233,7 @@ define @vxor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i64( %va, %b, %m, i32 %evl) @@ -2262,14 +2262,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2309,7 +2309,7 @@ define @vxor_vi_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2335,7 +2335,7 @@ define @vxor_vi_nxv4i64_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2363,7 +2363,7 @@ define @vxor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i64( %va, %b, %m, i32 %evl) @@ -2392,14 +2392,14 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2439,7 +2439,7 @@ define @vxor_vi_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2465,7 +2465,7 @@ define @vxor_vi_nxv8i64_1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -7,7 +7,7 @@ define @vzext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vzext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vzext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @vzext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vzext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -127,7 +127,7 @@ define @vzext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -163,13 +163,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB12_2: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v10, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB12_4: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vzext.vf4 v24, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v24