diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2114,7 +2114,7 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, int VLIndex, unsigned SEWIndex, - unsigned VLMul) { + unsigned VLMul, bool WritesElement0) { MachineFunction &MF = *BB->getParent(); DebugLoc DL = MI.getDebugLoc(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); @@ -2141,9 +2141,19 @@ MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead) .addReg(RISCV::X0, RegState::Kill); + // Default to tail agnostic unless the destination is tied to a source. In + // that case the user would have some control over the tail values. The tail + // policy is also ignored on instructions that only update element 0 like + // vmv.s.x or reductions so use agnostic there to match the common case. + // FIXME: This is conservatively correct, but we might want to detect that + // the input is undefined. + bool TailAgnostic = true; + if (MI.isRegTiedToUseOperand(0) && !WritesElement0) + TailAgnostic = false; + // For simplicity we reuse the vtype representation here. MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth, - /*TailAgnostic*/ true, + /*TailAgnostic*/ TailAgnostic, /*MaskAgnostic*/ false)); // Remove (now) redundant operands from pseudo @@ -2164,9 +2174,10 @@ RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) { int VLIndex = RVV->getVLIndex(); int SEWIndex = RVV->getSEWIndex(); + bool WritesElement0 = RVV->writesElement0(); assert(SEWIndex >= 0 && "SEWIndex must be >= 0"); - return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul); + return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul, WritesElement0); } switch (MI.getOpcode()) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -263,6 +263,7 @@ bits<8> MergeOpIndex = InvalidIndex.V; bits<3> VLMul; bit HasDummyMask = 0; + bit WritesElement0 = 0; } // The actual table. @@ -270,7 +271,7 @@ let FilterClass = "RISCVVPseudo"; let CppTypeName = "PseudoInfo"; let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex", - "VLMul", "HasDummyMask" ]; + "VLMul", "HasDummyMask", "WritesElement0" ]; let PrimaryKey = [ "Pseudo" ]; let PrimaryKeyName = "getPseudoInfo"; } @@ -1159,8 +1160,10 @@ } multiclass VPseudoReductionV_VS { - foreach m = MxList.m in + foreach m = MxList.m in { + let WritesElement0 = 1 in defm _VS : VPseudoTernary; + } } //===----------------------------------------------------------------------===// @@ -2477,7 +2480,7 @@ def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>, RISCVVPseudo; - let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X, + let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X, WritesElement0 = 1, Constraints = "$rd = $rs1" in def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, GPR:$rs2, @@ -2502,7 +2505,7 @@ (ins m.vrclass:$rs2, ixlenimm:$sew), []>, RISCVVPseudo; - let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, + let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, WritesElement0 = 1, Constraints = "$rd = $rs1" in def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, FPR32:$rs2, diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h @@ -378,6 +378,7 @@ uint8_t MergeOpIndex; uint8_t VLMul; bool HasDummyMask; + bool WritesElement0; int getVLIndex() const { return static_cast(VLIndex); } @@ -386,6 +387,8 @@ int getMergeOpIndex() const { return static_cast(MergeOpIndex); } bool hasDummyMask() const { return HasDummyMask; } + + bool writesElement0() const { return WritesElement0; } }; using namespace RISCV; diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaadd.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vaaddu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vand.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vand.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasub.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vasubu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdiv.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vdivu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -29,7 +29,7 @@ define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( %0, @@ -469,7 +469,7 @@ define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( %0, @@ -509,7 +509,7 @@ define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( %0, @@ -549,7 +549,7 @@ define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( %0, @@ -589,7 +589,7 @@ define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( %0, @@ -629,7 +629,7 @@ define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( %0, @@ -749,7 +749,7 @@ define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( %0, @@ -789,7 +789,7 @@ define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( %0, @@ -829,7 +829,7 @@ define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( %0, @@ -869,7 +869,7 @@ define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -29,7 +29,7 @@ define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( %0, @@ -469,7 +469,7 @@ define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( %0, @@ -509,7 +509,7 @@ define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( %0, @@ -549,7 +549,7 @@ define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( %0, @@ -589,7 +589,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( %0, @@ -629,7 +629,7 @@ define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( %0, @@ -749,7 +749,7 @@ define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( %0, @@ -789,7 +789,7 @@ define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( %0, @@ -829,7 +829,7 @@ define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( %0, @@ -869,7 +869,7 @@ define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( %0, @@ -909,7 +909,7 @@ define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( %0, @@ -949,7 +949,7 @@ define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( %0, @@ -989,7 +989,7 @@ define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( %0, @@ -1029,7 +1029,7 @@ define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( %0, @@ -1069,7 +1069,7 @@ define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( %0, @@ -1109,7 +1109,7 @@ define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( %0, @@ -1149,7 +1149,7 @@ define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( %0, @@ -1189,7 +1189,7 @@ define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -433,7 +433,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -457,7 +457,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -481,7 +481,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -553,7 +553,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -577,7 +577,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -601,7 +601,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -625,7 +625,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -673,7 +673,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -697,7 +697,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -745,7 +745,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -769,7 +769,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -793,7 +793,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -817,7 +817,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -841,7 +841,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -221,7 +221,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -244,7 +244,7 @@ define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -409,7 +409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -432,7 +432,7 @@ define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -455,7 +455,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -478,7 +478,7 @@ define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -526,7 +526,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -575,7 +575,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -599,7 +599,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -623,7 +623,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -647,7 +647,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -671,7 +671,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -719,7 +719,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -743,7 +743,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -767,7 +767,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -791,7 +791,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -815,7 +815,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -839,7 +839,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -887,7 +887,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -911,7 +911,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -935,7 +935,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -959,7 +959,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -983,7 +983,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1007,7 +1007,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1079,7 +1079,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll @@ -29,7 +29,7 @@ define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll @@ -29,7 +29,7 @@ define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( %0, @@ -469,7 +469,7 @@ define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( %0, @@ -509,7 +509,7 @@ define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( %0, @@ -549,7 +549,7 @@ define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( %0, @@ -589,7 +589,7 @@ define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll @@ -33,7 +33,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: fmv.h.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -311,7 +311,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -357,7 +357,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -403,7 +403,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -449,7 +449,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -497,7 +497,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll @@ -33,7 +33,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: fmv.h.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -311,7 +311,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -357,7 +357,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -403,7 +403,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -449,7 +449,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -497,7 +497,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -543,7 +543,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -589,7 +589,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -635,7 +635,7 @@ ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -683,7 +683,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: fmv.d.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -34,7 +34,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -81,7 +81,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -128,7 +128,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -175,7 +175,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -222,7 +222,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: fmv.h.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -318,7 +318,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -365,7 +365,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -412,7 +412,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -459,7 +459,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -508,7 +508,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -34,7 +34,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -81,7 +81,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -128,7 +128,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -175,7 +175,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -222,7 +222,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: fmv.h.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -318,7 +318,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -365,7 +365,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -412,7 +412,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -459,7 +459,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -508,7 +508,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -555,7 +555,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -602,7 +602,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -649,7 +649,7 @@ ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -698,7 +698,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: fmv.d.x ft0, a1 -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -29,7 +29,7 @@ define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( %0, @@ -469,7 +469,7 @@ define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( %0, @@ -509,7 +509,7 @@ define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( %0, @@ -549,7 +549,7 @@ define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( %0, @@ -589,7 +589,7 @@ define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( %0, @@ -629,7 +629,7 @@ define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( %0, @@ -749,7 +749,7 @@ define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( %0, @@ -789,7 +789,7 @@ define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( %0, @@ -829,7 +829,7 @@ define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( %0, @@ -869,7 +869,7 @@ define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -29,7 +29,7 @@ define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( %0, @@ -69,7 +69,7 @@ define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( %0, @@ -109,7 +109,7 @@ define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( %0, @@ -149,7 +149,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( %0, @@ -189,7 +189,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( %0, @@ -229,7 +229,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( %0, @@ -269,7 +269,7 @@ define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( %0, @@ -309,7 +309,7 @@ define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( %0, @@ -389,7 +389,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( %0, @@ -429,7 +429,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( %0, @@ -469,7 +469,7 @@ define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( %0, @@ -509,7 +509,7 @@ define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( %0, @@ -549,7 +549,7 @@ define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( %0, @@ -589,7 +589,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( %0, @@ -629,7 +629,7 @@ define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( %0, @@ -749,7 +749,7 @@ define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( %0, @@ -789,7 +789,7 @@ define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( %0, @@ -829,7 +829,7 @@ define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( %0, @@ -869,7 +869,7 @@ define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( %0, @@ -909,7 +909,7 @@ define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( %0, @@ -949,7 +949,7 @@ define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( %0, @@ -989,7 +989,7 @@ define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( %0, @@ -1029,7 +1029,7 @@ define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( %0, @@ -1069,7 +1069,7 @@ define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( %0, @@ -1109,7 +1109,7 @@ define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( %0, @@ -1149,7 +1149,7 @@ define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( %0, @@ -1189,7 +1189,7 @@ define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfwadd_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv1f32.f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfwadd_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv2f32.f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfwadd_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv4f32.f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfwadd_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -247,7 +247,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -295,7 +295,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -319,7 +319,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -343,7 +343,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -367,7 +367,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -391,7 +391,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -415,7 +415,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -441,7 +441,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -467,7 +467,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -246,7 +246,7 @@ define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -387,7 +387,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -437,7 +437,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -461,7 +461,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -485,7 +485,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -509,7 +509,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -533,7 +533,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -557,7 +557,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -581,7 +581,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -605,7 +605,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -657,7 +657,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -681,7 +681,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -705,7 +705,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -729,7 +729,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -753,7 +753,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -777,7 +777,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -801,7 +801,7 @@ ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -827,7 +827,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -853,7 +853,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -247,7 +247,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -295,7 +295,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -319,7 +319,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -343,7 +343,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -367,7 +367,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -391,7 +391,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -415,7 +415,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -441,7 +441,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -467,7 +467,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -246,7 +246,7 @@ define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -387,7 +387,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -437,7 +437,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -461,7 +461,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -485,7 +485,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -509,7 +509,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -533,7 +533,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -557,7 +557,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -581,7 +581,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -605,7 +605,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -657,7 +657,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -681,7 +681,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -705,7 +705,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -729,7 +729,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -753,7 +753,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -777,7 +777,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -801,7 +801,7 @@ ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -827,7 +827,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -853,7 +853,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f16.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f16.f16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f16.f16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f16.f16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv16f16.f16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f16.f16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfwmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f16.f16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfwmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfwmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfwmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv16f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfwmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv1f32.f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfwmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv2f32.f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfwmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv4f32.f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfwmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwmul.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -247,7 +247,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -295,7 +295,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -319,7 +319,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -343,7 +343,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -367,7 +367,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -391,7 +391,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -415,7 +415,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -441,7 +441,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -467,7 +467,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -246,7 +246,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -387,7 +387,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -437,7 +437,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -461,7 +461,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -485,7 +485,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -509,7 +509,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -533,7 +533,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -557,7 +557,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -581,7 +581,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -605,7 +605,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -657,7 +657,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -681,7 +681,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -705,7 +705,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -729,7 +729,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -753,7 +753,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -777,7 +777,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -801,7 +801,7 @@ ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -827,7 +827,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -853,7 +853,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -247,7 +247,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -271,7 +271,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -295,7 +295,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -319,7 +319,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -343,7 +343,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -367,7 +367,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -391,7 +391,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -415,7 +415,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -441,7 +441,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -467,7 +467,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -197,7 +197,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -223,7 +223,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -246,7 +246,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -387,7 +387,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -437,7 +437,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -461,7 +461,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -485,7 +485,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -509,7 +509,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -533,7 +533,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -557,7 +557,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -581,7 +581,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -605,7 +605,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -657,7 +657,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -681,7 +681,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -705,7 +705,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -729,7 +729,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -753,7 +753,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -777,7 +777,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -801,7 +801,7 @@ ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -827,7 +827,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -853,7 +853,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfwsub_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv1f32.f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfwsub_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv2f32.f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfwsub_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv4f32.f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfwsub_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll @@ -22,7 +22,7 @@ define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i8( %0, @@ -54,7 +54,7 @@ define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i8( %0, @@ -86,7 +86,7 @@ define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i8( %0, @@ -118,7 +118,7 @@ define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i8( %0, @@ -150,7 +150,7 @@ define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i8( %0, @@ -182,7 +182,7 @@ define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv32i8( %0, @@ -214,7 +214,7 @@ define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16 -; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,mf4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i16( %0, @@ -246,7 +246,7 @@ define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16 -; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i16( %0, @@ -278,7 +278,7 @@ define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16 -; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i16( %0, @@ -310,7 +310,7 @@ define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16 -; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i16( %0, @@ -342,7 +342,7 @@ define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16 -; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i16( %0, @@ -374,7 +374,7 @@ define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16 -; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv32i16( %0, @@ -406,7 +406,7 @@ define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32 -; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i32( %0, @@ -438,7 +438,7 @@ define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32 -; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i32( %0, @@ -470,7 +470,7 @@ define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32 -; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i32( %0, @@ -502,7 +502,7 @@ define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32 -; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32 -; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll @@ -22,7 +22,7 @@ define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i8( %0, @@ -54,7 +54,7 @@ define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i8( %0, @@ -86,7 +86,7 @@ define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i8( %0, @@ -118,7 +118,7 @@ define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i8( %0, @@ -150,7 +150,7 @@ define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i8( %0, @@ -182,7 +182,7 @@ define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e8,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv32i8( %0, @@ -214,7 +214,7 @@ define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16 -; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,mf4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i16( %0, @@ -246,7 +246,7 @@ define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16 -; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i16( %0, @@ -278,7 +278,7 @@ define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16 -; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i16( %0, @@ -310,7 +310,7 @@ define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16 -; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i16( %0, @@ -342,7 +342,7 @@ define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16 -; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i16( %0, @@ -374,7 +374,7 @@ define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16 -; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e16,m8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv32i16( %0, @@ -406,7 +406,7 @@ define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32 -; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,mf2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i32( %0, @@ -438,7 +438,7 @@ define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32 -; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i32( %0, @@ -470,7 +470,7 @@ define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32 -; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i32( %0, @@ -502,7 +502,7 @@ define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32 -; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32 -; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e32,m8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv16i32( %0, @@ -566,7 +566,7 @@ define @intrinsic_vid_mask_v_nxv1i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64 -; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, a0, e64,m1,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv1i64( %0, @@ -598,7 +598,7 @@ define @intrinsic_vid_mask_v_nxv2i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64 -; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, a0, e64,m2,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv2i64( %0, @@ -630,7 +630,7 @@ define @intrinsic_vid_mask_v_nxv4i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64 -; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, a0, e64,m4,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv4i64( %0, @@ -662,7 +662,7 @@ define @intrinsic_vid_mask_v_nxv8i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64 -; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, a0, e64,m8,tu,mu ; CHECK: vid.v {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vid.mask.nxv8i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -68,7 +68,7 @@ define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -108,7 +108,7 @@ define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -188,7 +188,7 @@ define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -228,7 +228,7 @@ define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -268,7 +268,7 @@ define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -308,7 +308,7 @@ define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -348,7 +348,7 @@ define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -388,7 +388,7 @@ define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -428,7 +428,7 @@ define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -468,7 +468,7 @@ define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -508,7 +508,7 @@ define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -548,7 +548,7 @@ define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -588,7 +588,7 @@ define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -628,7 +628,7 @@ define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -668,7 +668,7 @@ define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -708,7 +708,7 @@ define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -68,7 +68,7 @@ define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -108,7 +108,7 @@ define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -188,7 +188,7 @@ define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -228,7 +228,7 @@ define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -268,7 +268,7 @@ define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -308,7 +308,7 @@ define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -348,7 +348,7 @@ define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -388,7 +388,7 @@ define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -428,7 +428,7 @@ define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -468,7 +468,7 @@ define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -508,7 +508,7 @@ define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -548,7 +548,7 @@ define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -588,7 +588,7 @@ define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -628,7 +628,7 @@ define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -668,7 +668,7 @@ define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -708,7 +708,7 @@ define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -748,7 +748,7 @@ define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -788,7 +788,7 @@ define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -828,7 +828,7 @@ define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -868,7 +868,7 @@ define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu ; CHECK-NEXT: viota.m v16, v0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll @@ -26,7 +26,7 @@ define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i32( %0, @@ -62,7 +62,7 @@ define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i32( %0, @@ -98,7 +98,7 @@ define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i32( %0, @@ -134,7 +134,7 @@ define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i32( %0, @@ -170,7 +170,7 @@ define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i32( %0, @@ -206,7 +206,7 @@ define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1f32( %0, @@ -242,7 +242,7 @@ define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2f32( %0, @@ -278,7 +278,7 @@ define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4f32( %0, @@ -314,7 +314,7 @@ define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8f32( %0, @@ -350,7 +350,7 @@ define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16f32( %0, @@ -386,7 +386,7 @@ define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i16( %0, @@ -422,7 +422,7 @@ define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i16( %0, @@ -458,7 +458,7 @@ define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i16( %0, @@ -494,7 +494,7 @@ define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i16( %0, @@ -530,7 +530,7 @@ define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i16( %0, @@ -566,7 +566,7 @@ define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32i16( %0, @@ -602,7 +602,7 @@ define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1f16( %0, @@ -638,7 +638,7 @@ define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2f16( %0, @@ -674,7 +674,7 @@ define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4f16( %0, @@ -710,7 +710,7 @@ define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8f16( %0, @@ -746,7 +746,7 @@ define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16f16( %0, @@ -782,7 +782,7 @@ define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32f16( %0, @@ -818,7 +818,7 @@ define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i8( %0, @@ -854,7 +854,7 @@ define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i8( %0, @@ -890,7 +890,7 @@ define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i8( %0, @@ -926,7 +926,7 @@ define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i8( %0, @@ -962,7 +962,7 @@ define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i8( %0, @@ -998,7 +998,7 @@ define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32i8( %0, @@ -1034,7 +1034,7 @@ define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll @@ -26,7 +26,7 @@ define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i64( %0, @@ -62,7 +62,7 @@ define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i64( %0, @@ -98,7 +98,7 @@ define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i64( %0, @@ -134,7 +134,7 @@ define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i64( %0, @@ -170,7 +170,7 @@ define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1f64( %0, @@ -206,7 +206,7 @@ define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2f64( %0, @@ -242,7 +242,7 @@ define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4f64( %0, @@ -278,7 +278,7 @@ define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8f64( %0, @@ -314,7 +314,7 @@ define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i32( %0, @@ -350,7 +350,7 @@ define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i32( %0, @@ -386,7 +386,7 @@ define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i32( %0, @@ -422,7 +422,7 @@ define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i32( %0, @@ -458,7 +458,7 @@ define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i32( %0, @@ -494,7 +494,7 @@ define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1f32( %0, @@ -530,7 +530,7 @@ define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2f32( %0, @@ -566,7 +566,7 @@ define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4f32( %0, @@ -602,7 +602,7 @@ define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8f32( %0, @@ -638,7 +638,7 @@ define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16f32( %0, @@ -674,7 +674,7 @@ define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i16( %0, @@ -710,7 +710,7 @@ define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i16( %0, @@ -746,7 +746,7 @@ define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i16( %0, @@ -782,7 +782,7 @@ define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i16( %0, @@ -818,7 +818,7 @@ define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i16( %0, @@ -854,7 +854,7 @@ define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32i16( %0, @@ -890,7 +890,7 @@ define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1f16( %0, @@ -926,7 +926,7 @@ define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2f16( %0, @@ -962,7 +962,7 @@ define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4f16( %0, @@ -998,7 +998,7 @@ define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8f16( %0, @@ -1034,7 +1034,7 @@ define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16f16( %0, @@ -1070,7 +1070,7 @@ define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32f16( %0, @@ -1106,7 +1106,7 @@ define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv1i8( %0, @@ -1142,7 +1142,7 @@ define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv2i8( %0, @@ -1178,7 +1178,7 @@ define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv4i8( %0, @@ -1214,7 +1214,7 @@ define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv8i8( %0, @@ -1250,7 +1250,7 @@ define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv16i8( %0, @@ -1286,7 +1286,7 @@ define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv32i8( %0, @@ -1322,7 +1322,7 @@ define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vle.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll @@ -25,7 +25,7 @@ define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i32( %0, @@ -61,7 +61,7 @@ define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i32( %0, @@ -97,7 +97,7 @@ define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i32( %0, @@ -133,7 +133,7 @@ define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i32( %0, @@ -169,7 +169,7 @@ define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i32( %0, @@ -205,7 +205,7 @@ define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1f32( %0, @@ -241,7 +241,7 @@ define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2f32( %0, @@ -277,7 +277,7 @@ define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4f32( %0, @@ -313,7 +313,7 @@ define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8f32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16f32( %0, @@ -385,7 +385,7 @@ define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i16( %0, @@ -421,7 +421,7 @@ define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i16( %0, @@ -457,7 +457,7 @@ define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i16( %0, @@ -493,7 +493,7 @@ define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i16( %0, @@ -529,7 +529,7 @@ define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i16( %0, @@ -565,7 +565,7 @@ define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32i16( %0, @@ -601,7 +601,7 @@ define @intrinsic_vleff_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1f16( %0, @@ -637,7 +637,7 @@ define @intrinsic_vleff_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2f16( %0, @@ -673,7 +673,7 @@ define @intrinsic_vleff_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4f16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vleff_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8f16( %0, @@ -745,7 +745,7 @@ define @intrinsic_vleff_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16f16( %0, @@ -781,7 +781,7 @@ define @intrinsic_vleff_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32f16( %0, @@ -817,7 +817,7 @@ define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i8( %0, @@ -853,7 +853,7 @@ define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i8( %0, @@ -889,7 +889,7 @@ define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i8( %0, @@ -925,7 +925,7 @@ define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i8( %0, @@ -961,7 +961,7 @@ define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i8( %0, @@ -997,7 +997,7 @@ define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32i8( %0, @@ -1033,7 +1033,7 @@ define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll @@ -25,7 +25,7 @@ define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i64( %0, @@ -61,7 +61,7 @@ define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i64( %0, @@ -97,7 +97,7 @@ define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i64( %0, @@ -133,7 +133,7 @@ define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i64( %0, @@ -169,7 +169,7 @@ define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1f64( %0, @@ -205,7 +205,7 @@ define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2f64( %0, @@ -241,7 +241,7 @@ define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4f64( %0, @@ -277,7 +277,7 @@ define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8f64( %0, @@ -313,7 +313,7 @@ define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i32( %0, @@ -349,7 +349,7 @@ define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i32( %0, @@ -385,7 +385,7 @@ define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i32( %0, @@ -421,7 +421,7 @@ define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i32( %0, @@ -457,7 +457,7 @@ define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i32( %0, @@ -493,7 +493,7 @@ define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1f32( %0, @@ -529,7 +529,7 @@ define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2f32( %0, @@ -565,7 +565,7 @@ define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4f32( %0, @@ -601,7 +601,7 @@ define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8f32( %0, @@ -637,7 +637,7 @@ define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16f32( %0, @@ -673,7 +673,7 @@ define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i16( %0, @@ -709,7 +709,7 @@ define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i16( %0, @@ -745,7 +745,7 @@ define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i16( %0, @@ -781,7 +781,7 @@ define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i16( %0, @@ -817,7 +817,7 @@ define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i16( %0, @@ -853,7 +853,7 @@ define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32i16( %0, @@ -889,7 +889,7 @@ define @intrinsic_vleff_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1f16( %0, @@ -925,7 +925,7 @@ define @intrinsic_vleff_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2f16( %0, @@ -961,7 +961,7 @@ define @intrinsic_vleff_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4f16( %0, @@ -997,7 +997,7 @@ define @intrinsic_vleff_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8f16( %0, @@ -1033,7 +1033,7 @@ define @intrinsic_vleff_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16f16( %0, @@ -1069,7 +1069,7 @@ define @intrinsic_vleff_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32f16( %0, @@ -1105,7 +1105,7 @@ define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv1i8( %0, @@ -1141,7 +1141,7 @@ define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv2i8( %0, @@ -1177,7 +1177,7 @@ define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv4i8( %0, @@ -1213,7 +1213,7 @@ define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv8i8( %0, @@ -1249,7 +1249,7 @@ define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv16i8( %0, @@ -1285,7 +1285,7 @@ define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv32i8( %0, @@ -1321,7 +1321,7 @@ define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t %a = call @llvm.riscv.vleff.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,mf2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i32( %0, @@ -68,7 +68,7 @@ define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m1,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i32( %0, @@ -108,7 +108,7 @@ define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i32( %0, @@ -148,7 +148,7 @@ define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m4,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i32( %0, @@ -188,7 +188,7 @@ define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m8,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i32( %0, @@ -228,7 +228,7 @@ define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,mf2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m1,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m4,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m8,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16f32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m1,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i16( %0, @@ -588,7 +588,7 @@ define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i16( %0, @@ -628,7 +628,7 @@ define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m8,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32i16( %0, @@ -668,7 +668,7 @@ define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1f16( %0, @@ -708,7 +708,7 @@ define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2f16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m1,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4f16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8f16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16f16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m8,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32f16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf8,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf4,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf2,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m1,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m2,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m4,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m8,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m1,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i64( %0, @@ -68,7 +68,7 @@ define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m2,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i64( %0, @@ -108,7 +108,7 @@ define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m4,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i64( %0, @@ -148,7 +148,7 @@ define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m8,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i64( %0, @@ -188,7 +188,7 @@ define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m1,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1f64( %0, @@ -228,7 +228,7 @@ define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m2,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2f64( %0, @@ -268,7 +268,7 @@ define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m4,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4f64( %0, @@ -308,7 +308,7 @@ define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64 -; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e64,m8,tu,mu ; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8f64( %0, @@ -348,7 +348,7 @@ define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,mf2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m1,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m4,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m8,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,mf2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1f32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m1,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2f32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m2,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4f32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m4,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8f32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32 -; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e32,m8,tu,mu ; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16f32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m1,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m8,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1f16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,mf2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2f16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m1,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4f16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m2,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8f16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m4,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16f16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16 -; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e16,m8,tu,mu ; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32f16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf8,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv1i8( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf4,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv2i8( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,mf2,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv4i8( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m1,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv8i8( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m2,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv16i8( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m4,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv32i8( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, a2, e8,m8,tu,mu ; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t %a = call @llvm.riscv.vlse.mask.nxv64i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( %0, @@ -68,7 +68,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( %0, @@ -108,7 +108,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( %0, @@ -148,7 +148,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( %0, @@ -188,7 +188,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( %0, @@ -228,7 +228,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( %0, @@ -428,7 +428,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( %0, @@ -468,7 +468,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( %0, @@ -788,7 +788,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( %0, @@ -828,7 +828,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( %0, @@ -868,7 +868,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( %0, @@ -908,7 +908,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( %0, @@ -948,7 +948,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( %0, @@ -988,7 +988,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( %0, @@ -1788,7 +1788,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( %0, @@ -1828,7 +1828,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( %0, @@ -1868,7 +1868,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( %0, @@ -1908,7 +1908,7 @@ define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( %0, @@ -1988,7 +1988,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( %0, @@ -2068,7 +2068,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( %0, @@ -2108,7 +2108,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( %0, @@ -2148,7 +2148,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( %0, @@ -2188,7 +2188,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( %0, @@ -2228,7 +2228,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( %0, @@ -2268,7 +2268,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( %0, @@ -2348,7 +2348,7 @@ define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( %0, @@ -2388,7 +2388,7 @@ define @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( %0, @@ -2428,7 +2428,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( %0, @@ -2468,7 +2468,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( %0, @@ -2508,7 +2508,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( %0, @@ -2548,7 +2548,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( %0, @@ -2588,7 +2588,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( %0, @@ -2628,7 +2628,7 @@ define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( %0, @@ -2668,7 +2668,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( %0, @@ -2708,7 +2708,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( %0, @@ -2748,7 +2748,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( %0, @@ -2788,7 +2788,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( %0, @@ -2828,7 +2828,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( %0, @@ -2868,7 +2868,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( %0, @@ -2908,7 +2908,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( %0, @@ -2948,7 +2948,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( %0, @@ -2988,7 +2988,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( %0, @@ -3028,7 +3028,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( %0, @@ -3068,7 +3068,7 @@ define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( %0, @@ -3108,7 +3108,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( %0, @@ -3148,7 +3148,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( %0, @@ -3188,7 +3188,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( %0, @@ -3228,7 +3228,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( %0, @@ -3268,7 +3268,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64( %0, @@ -68,7 +68,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64( %0, @@ -108,7 +108,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64( %0, @@ -148,7 +148,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64( %0, @@ -188,7 +188,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64( %0, @@ -228,7 +228,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64( %0, @@ -268,7 +268,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64( %0, @@ -308,7 +308,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64( %0, @@ -348,7 +348,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64( %0, @@ -388,7 +388,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64( %0, @@ -428,7 +428,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64( %0, @@ -468,7 +468,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64( %0, @@ -508,7 +508,7 @@ define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64( %0, @@ -548,7 +548,7 @@ define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64( %0, @@ -588,7 +588,7 @@ define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64( %0, @@ -628,7 +628,7 @@ define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64( %0, @@ -668,7 +668,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64( %0, @@ -708,7 +708,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64( %0, @@ -748,7 +748,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64( %0, @@ -948,7 +948,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64( %0, @@ -988,7 +988,7 @@ define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32( %0, @@ -1788,7 +1788,7 @@ define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32( %0, @@ -1828,7 +1828,7 @@ define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32( %0, @@ -1868,7 +1868,7 @@ define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32( %0, @@ -1908,7 +1908,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( %0, @@ -1988,7 +1988,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( %0, @@ -2068,7 +2068,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( %0, @@ -2108,7 +2108,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( %0, @@ -2148,7 +2148,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( %0, @@ -2188,7 +2188,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( %0, @@ -2228,7 +2228,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( %0, @@ -2268,7 +2268,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32( %0, @@ -2348,7 +2348,7 @@ define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32( %0, @@ -2388,7 +2388,7 @@ define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32( %0, @@ -2428,7 +2428,7 @@ define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32( %0, @@ -2468,7 +2468,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( %0, @@ -2508,7 +2508,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( %0, @@ -2548,7 +2548,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( %0, @@ -2588,7 +2588,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( %0, @@ -2628,7 +2628,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( %0, @@ -2668,7 +2668,7 @@ define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( %0, @@ -2708,7 +2708,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( %0, @@ -2748,7 +2748,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( %0, @@ -2788,7 +2788,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( %0, @@ -2828,7 +2828,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( %0, @@ -2868,7 +2868,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( %0, @@ -2908,7 +2908,7 @@ define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( %0, @@ -2948,7 +2948,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( %0, @@ -2988,7 +2988,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( %0, @@ -3028,7 +3028,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( %0, @@ -3068,7 +3068,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( %0, @@ -3108,7 +3108,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( %0, @@ -3148,7 +3148,7 @@ define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16( %0, @@ -3188,7 +3188,7 @@ define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16( %0, @@ -3228,7 +3228,7 @@ define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16( %0, @@ -3268,7 +3268,7 @@ define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16( %0, @@ -3308,7 +3308,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( %0, @@ -3348,7 +3348,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( %0, @@ -3388,7 +3388,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( %0, @@ -3428,7 +3428,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( %0, @@ -3468,7 +3468,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( %0, @@ -3508,7 +3508,7 @@ define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( %0, @@ -3548,7 +3548,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( %0, @@ -3588,7 +3588,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( %0, @@ -3628,7 +3628,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( %0, @@ -3668,7 +3668,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( %0, @@ -3708,7 +3708,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( %0, @@ -3748,7 +3748,7 @@ define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16( %0, @@ -3788,7 +3788,7 @@ define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16( %0, @@ -3828,7 +3828,7 @@ define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16( %0, @@ -3868,7 +3868,7 @@ define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16( %0, @@ -3908,7 +3908,7 @@ define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( %0, @@ -3948,7 +3948,7 @@ define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( %0, @@ -3988,7 +3988,7 @@ define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( %0, @@ -4028,7 +4028,7 @@ define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( %0, @@ -4068,7 +4068,7 @@ define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( %0, @@ -4108,7 +4108,7 @@ define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( %0, @@ -4148,7 +4148,7 @@ define @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( %0, @@ -4188,7 +4188,7 @@ define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( %0, @@ -4228,7 +4228,7 @@ define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( %0, @@ -4268,7 +4268,7 @@ define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( %0, @@ -4308,7 +4308,7 @@ define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( %0, @@ -4348,7 +4348,7 @@ define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( %0, @@ -4388,7 +4388,7 @@ define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( %0, @@ -4428,7 +4428,7 @@ define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( %0, @@ -4468,7 +4468,7 @@ define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( %0, @@ -4508,7 +4508,7 @@ define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( %0, @@ -4548,7 +4548,7 @@ define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( %0, @@ -4588,7 +4588,7 @@ define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( %0, @@ -4628,7 +4628,7 @@ define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8( %0, @@ -4668,7 +4668,7 @@ define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8( %0, @@ -4708,7 +4708,7 @@ define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8( %0, @@ -4748,7 +4748,7 @@ define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8( %0, @@ -4788,7 +4788,7 @@ define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( %0, @@ -4828,7 +4828,7 @@ define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( %0, @@ -4868,7 +4868,7 @@ define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( %0, @@ -4908,7 +4908,7 @@ define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( %0, @@ -4948,7 +4948,7 @@ define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( %0, @@ -4988,7 +4988,7 @@ define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( %0, @@ -5028,7 +5028,7 @@ define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( %0, @@ -5068,7 +5068,7 @@ define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( %0, @@ -5108,7 +5108,7 @@ define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( %0, @@ -5148,7 +5148,7 @@ define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( %0, @@ -5188,7 +5188,7 @@ define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( %0, @@ -5228,7 +5228,7 @@ define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8( %0, @@ -5268,7 +5268,7 @@ define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8( %0, @@ -5308,7 +5308,7 @@ define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8( %0, @@ -5348,7 +5348,7 @@ define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i8.i8( %0, @@ -660,7 +660,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i8.i8( %0, @@ -681,7 +681,7 @@ define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i8.i8( %0, @@ -702,7 +702,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i8.i8( %0, @@ -723,7 +723,7 @@ define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i8.i8( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i8.i8( %0, @@ -765,7 +765,7 @@ define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv32i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv32i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i16.i16( %0, @@ -912,7 +912,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i16.i16( %0, @@ -933,7 +933,7 @@ define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i16.i16( %0, @@ -954,7 +954,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i16.i16( %0, @@ -975,7 +975,7 @@ define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i16.i16( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i16.i16( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i32.i32( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i32.i32( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i32.i32( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i32.i32( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i32.i32( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i32.i32( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i64.nxv1i64( %0, @@ -660,7 +660,7 @@ define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( %0, @@ -681,7 +681,7 @@ define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i64.nxv2i64( %0, @@ -702,7 +702,7 @@ define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( %0, @@ -723,7 +723,7 @@ define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i64.nxv4i64( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( %0, @@ -765,7 +765,7 @@ define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i8.i8( %0, @@ -912,7 +912,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i8.i8( %0, @@ -933,7 +933,7 @@ define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i8.i8( %0, @@ -954,7 +954,7 @@ define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i8.i8( %0, @@ -975,7 +975,7 @@ define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv32i8.i8( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv32i8.i8( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i16.i16( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i16.i16( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i16.i16( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i16.i16( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv16i16.i16( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv16i16.i16( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i32.i32( %0, @@ -1269,7 +1269,7 @@ define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i32.i32( %0, @@ -1290,7 +1290,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i32.i32( %0, @@ -1311,7 +1311,7 @@ define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i32.i32( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i32.i32( %0, @@ -1353,7 +1353,7 @@ define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv8i32.i32( %0, @@ -1374,7 +1374,7 @@ define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv8i32.i32( %0, @@ -1395,7 +1395,7 @@ define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv1i64.i64( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv1i64.i64( %0, @@ -1437,7 +1437,7 @@ define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv2i64.i64( %0, @@ -1458,7 +1458,7 @@ define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv2i64.i64( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmacc.nxv4i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmacc.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i8.i8( %0, @@ -660,7 +660,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i8.i8( %0, @@ -681,7 +681,7 @@ define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i8.i8( %0, @@ -702,7 +702,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i8.i8( %0, @@ -723,7 +723,7 @@ define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i8.i8( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i8.i8( %0, @@ -765,7 +765,7 @@ define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv32i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv32i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i16.i16( %0, @@ -912,7 +912,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i16.i16( %0, @@ -933,7 +933,7 @@ define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i16.i16( %0, @@ -954,7 +954,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i16.i16( %0, @@ -975,7 +975,7 @@ define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i16.i16( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i16.i16( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i32.i32( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i32.i32( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i32.i32( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i32.i32( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i32.i32( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i32.i32( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i64.nxv1i64( %0, @@ -660,7 +660,7 @@ define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( %0, @@ -681,7 +681,7 @@ define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i64.nxv2i64( %0, @@ -702,7 +702,7 @@ define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( %0, @@ -723,7 +723,7 @@ define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i64.nxv4i64( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( %0, @@ -765,7 +765,7 @@ define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i8.i8( %0, @@ -912,7 +912,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i8.i8( %0, @@ -933,7 +933,7 @@ define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i8.i8( %0, @@ -954,7 +954,7 @@ define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i8.i8( %0, @@ -975,7 +975,7 @@ define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv32i8.i8( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv32i8.i8( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i16.i16( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i16.i16( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i16.i16( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i16.i16( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv16i16.i16( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv16i16.i16( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i32.i32( %0, @@ -1269,7 +1269,7 @@ define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i32.i32( %0, @@ -1290,7 +1290,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i32.i32( %0, @@ -1311,7 +1311,7 @@ define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i32.i32( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i32.i32( %0, @@ -1353,7 +1353,7 @@ define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv8i32.i32( %0, @@ -1374,7 +1374,7 @@ define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv8i32.i32( %0, @@ -1395,7 +1395,7 @@ define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv1i64.i64( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv1i64.i64( %0, @@ -1437,7 +1437,7 @@ define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv2i64.i64( %0, @@ -1458,7 +1458,7 @@ define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv2i64.i64( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vmadd.nxv4i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmadd.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmax.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmaxu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfeq_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv1f16.f16( %0, @@ -464,7 +464,7 @@ define @intrinsic_vmfeq_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv2f16.f16( %0, @@ -504,7 +504,7 @@ define @intrinsic_vmfeq_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv4f16.f16( %0, @@ -544,7 +544,7 @@ define @intrinsic_vmfeq_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv8f16.f16( %0, @@ -584,7 +584,7 @@ define @intrinsic_vmfeq_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv16f16.f16( %0, @@ -624,7 +624,7 @@ define @intrinsic_vmfeq_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv1f32.f32( %0, @@ -664,7 +664,7 @@ define @intrinsic_vmfeq_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv2f32.f32( %0, @@ -704,7 +704,7 @@ define @intrinsic_vmfeq_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv4f32.f32( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmfeq_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv1f64( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv2f64( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfeq.nxv4f64( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmfeq_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv1f16.f16( %0, @@ -596,7 +596,7 @@ define @intrinsic_vmfeq_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv2f16.f16( %0, @@ -636,7 +636,7 @@ define @intrinsic_vmfeq_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv4f16.f16( %0, @@ -676,7 +676,7 @@ define @intrinsic_vmfeq_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv8f16.f16( %0, @@ -716,7 +716,7 @@ define @intrinsic_vmfeq_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv16f16.f16( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmfeq_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv1f32.f32( %0, @@ -796,7 +796,7 @@ define @intrinsic_vmfeq_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv2f32.f32( %0, @@ -836,7 +836,7 @@ define @intrinsic_vmfeq_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv4f32.f32( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmfeq_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv8f32.f32( %0, @@ -916,7 +916,7 @@ define @intrinsic_vmfeq_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv1f64.f64( %0, @@ -956,7 +956,7 @@ define @intrinsic_vmfeq_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv2f64.f64( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmfeq_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfeq.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfge_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmfge_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmfge_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmfge_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmfge_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmfge_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv1f32.f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmfge_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv2f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmfge_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv4f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmfge_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfge_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmfge_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmfge_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmfge_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmfge_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmfge_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv1f32.f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmfge_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv2f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmfge_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv4f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmfge_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv8f32.f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmfge_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv1f64.f64( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmfge_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv2f64.f64( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmfge_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfge.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfgt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmfgt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmfgt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmfgt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmfgt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmfgt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv1f32.f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmfgt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv2f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmfgt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv4f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmfgt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfgt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv1f16.f16( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmfgt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv2f16.f16( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmfgt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv4f16.f16( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmfgt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv8f16.f16( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmfgt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv16f16.f16( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmfgt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv1f32.f32( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmfgt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv2f32.f32( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmfgt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv4f32.f32( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmfgt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv8f32.f32( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmfgt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv1f64.f64( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmfgt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv2f64.f64( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmfgt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfgt.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfle_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv1f16.f16( %0, @@ -464,7 +464,7 @@ define @intrinsic_vmfle_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv2f16.f16( %0, @@ -504,7 +504,7 @@ define @intrinsic_vmfle_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv4f16.f16( %0, @@ -544,7 +544,7 @@ define @intrinsic_vmfle_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv8f16.f16( %0, @@ -584,7 +584,7 @@ define @intrinsic_vmfle_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv16f16.f16( %0, @@ -624,7 +624,7 @@ define @intrinsic_vmfle_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv1f32.f32( %0, @@ -664,7 +664,7 @@ define @intrinsic_vmfle_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv2f32.f32( %0, @@ -704,7 +704,7 @@ define @intrinsic_vmfle_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv4f32.f32( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmfle_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv1f64( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv2f64( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfle.nxv4f64( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmfle_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv1f16.f16( %0, @@ -596,7 +596,7 @@ define @intrinsic_vmfle_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv2f16.f16( %0, @@ -636,7 +636,7 @@ define @intrinsic_vmfle_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv4f16.f16( %0, @@ -676,7 +676,7 @@ define @intrinsic_vmfle_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv8f16.f16( %0, @@ -716,7 +716,7 @@ define @intrinsic_vmfle_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv16f16.f16( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmfle_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv1f32.f32( %0, @@ -796,7 +796,7 @@ define @intrinsic_vmfle_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv2f32.f32( %0, @@ -836,7 +836,7 @@ define @intrinsic_vmfle_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv4f32.f32( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmfle_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv8f32.f32( %0, @@ -916,7 +916,7 @@ define @intrinsic_vmfle_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv1f64.f64( %0, @@ -956,7 +956,7 @@ define @intrinsic_vmfle_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv2f64.f64( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmfle_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfle.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmflt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv1f16.f16( %0, @@ -464,7 +464,7 @@ define @intrinsic_vmflt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv2f16.f16( %0, @@ -504,7 +504,7 @@ define @intrinsic_vmflt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv4f16.f16( %0, @@ -544,7 +544,7 @@ define @intrinsic_vmflt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv8f16.f16( %0, @@ -584,7 +584,7 @@ define @intrinsic_vmflt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv16f16.f16( %0, @@ -624,7 +624,7 @@ define @intrinsic_vmflt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv1f32.f32( %0, @@ -664,7 +664,7 @@ define @intrinsic_vmflt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv2f32.f32( %0, @@ -704,7 +704,7 @@ define @intrinsic_vmflt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv4f32.f32( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmflt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv1f64( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv2f64( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmflt.nxv4f64( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmflt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv1f16.f16( %0, @@ -596,7 +596,7 @@ define @intrinsic_vmflt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv2f16.f16( %0, @@ -636,7 +636,7 @@ define @intrinsic_vmflt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv4f16.f16( %0, @@ -676,7 +676,7 @@ define @intrinsic_vmflt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv8f16.f16( %0, @@ -716,7 +716,7 @@ define @intrinsic_vmflt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv16f16.f16( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmflt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv1f32.f32( %0, @@ -796,7 +796,7 @@ define @intrinsic_vmflt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv2f32.f32( %0, @@ -836,7 +836,7 @@ define @intrinsic_vmflt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv4f32.f32( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmflt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv8f32.f32( %0, @@ -916,7 +916,7 @@ define @intrinsic_vmflt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv1f64.f64( %0, @@ -956,7 +956,7 @@ define @intrinsic_vmflt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv2f64.f64( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmflt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmflt.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfne_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv1f16.f16( %0, @@ -464,7 +464,7 @@ define @intrinsic_vmfne_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv2f16.f16( %0, @@ -504,7 +504,7 @@ define @intrinsic_vmfne_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv4f16.f16( %0, @@ -544,7 +544,7 @@ define @intrinsic_vmfne_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv8f16.f16( %0, @@ -584,7 +584,7 @@ define @intrinsic_vmfne_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv16f16.f16( %0, @@ -624,7 +624,7 @@ define @intrinsic_vmfne_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv1f32.f32( %0, @@ -664,7 +664,7 @@ define @intrinsic_vmfne_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv2f32.f32( %0, @@ -704,7 +704,7 @@ define @intrinsic_vmfne_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv4f32.f32( %0, @@ -744,7 +744,7 @@ define @intrinsic_vmfne_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv8f32.f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv1f16( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv2f16( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv4f16( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv8f16( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv16f16( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv1f32( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv2f32( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv4f32( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv8f32( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv1f64( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv2f64( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmfne.nxv4f64( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmfne_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv1f16.f16( %0, @@ -596,7 +596,7 @@ define @intrinsic_vmfne_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv2f16.f16( %0, @@ -636,7 +636,7 @@ define @intrinsic_vmfne_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv4f16.f16( %0, @@ -676,7 +676,7 @@ define @intrinsic_vmfne_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv8f16.f16( %0, @@ -716,7 +716,7 @@ define @intrinsic_vmfne_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv16f16.f16( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmfne_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv1f32.f32( %0, @@ -796,7 +796,7 @@ define @intrinsic_vmfne_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv2f32.f32( %0, @@ -836,7 +836,7 @@ define @intrinsic_vmfne_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv4f32.f32( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmfne_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv8f32.f32( %0, @@ -916,7 +916,7 @@ define @intrinsic_vmfne_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv1f64.f64( %0, @@ -956,7 +956,7 @@ define @intrinsic_vmfne_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv2f64.f64( %0, @@ -996,7 +996,7 @@ define @intrinsic_vmfne_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t %a = call @llvm.riscv.vmfne.mask.nxv4f64.f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmin.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vminu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsbf.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( %0, @@ -1276,7 +1276,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( %0, @@ -1304,7 +1304,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( %0, @@ -1360,7 +1360,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( %0, @@ -1444,7 +1444,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( %0, @@ -1472,7 +1472,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmseq.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i64.i64( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( %0, @@ -1696,7 +1696,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( %0, @@ -1724,7 +1724,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( %0, @@ -1752,7 +1752,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( %0, @@ -1780,7 +1780,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( %0, @@ -1808,7 +1808,7 @@ define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( %0, @@ -1836,7 +1836,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( %0, @@ -1864,7 +1864,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( %0, @@ -1892,7 +1892,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( %0, @@ -1920,7 +1920,7 @@ define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vmseq_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vmseq_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv2i64.i64( %0, @@ -2004,7 +2004,7 @@ define @intrinsic_vmseq_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmseq.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( %0, @@ -616,7 +616,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( %0, @@ -644,7 +644,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( %0, @@ -672,7 +672,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( %0, @@ -700,7 +700,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( %0, @@ -784,7 +784,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( %0, @@ -812,7 +812,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( %0, @@ -840,7 +840,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( %0, @@ -924,7 +924,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( %0, @@ -952,7 +952,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i64.i64( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i64.i64( %0, @@ -736,7 +736,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( %0, @@ -764,7 +764,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( %0, @@ -792,7 +792,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( %0, @@ -820,7 +820,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( %0, @@ -904,7 +904,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( %0, @@ -932,7 +932,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( %0, @@ -960,7 +960,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( %0, @@ -1016,7 +1016,7 @@ define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( %0, @@ -1044,7 +1044,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( %0, @@ -1072,7 +1072,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( %0, @@ -1156,7 +1156,7 @@ define @intrinsic_vmsgt_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( %0, @@ -1184,7 +1184,7 @@ define @intrinsic_vmsgt_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv2i64.i64( %0, @@ -1212,7 +1212,7 @@ define @intrinsic_vmsgt_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgt.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( %0, @@ -616,7 +616,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( %0, @@ -644,7 +644,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( %0, @@ -672,7 +672,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( %0, @@ -700,7 +700,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( %0, @@ -756,7 +756,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( %0, @@ -784,7 +784,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( %0, @@ -812,7 +812,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( %0, @@ -840,7 +840,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( %0, @@ -924,7 +924,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( %0, @@ -952,7 +952,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64( %0, @@ -736,7 +736,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( %0, @@ -764,7 +764,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( %0, @@ -792,7 +792,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( %0, @@ -820,7 +820,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( %0, @@ -876,7 +876,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( %0, @@ -904,7 +904,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( %0, @@ -932,7 +932,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( %0, @@ -960,7 +960,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( %0, @@ -1016,7 +1016,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( %0, @@ -1044,7 +1044,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( %0, @@ -1072,7 +1072,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( %0, @@ -1156,7 +1156,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( %0, @@ -1184,7 +1184,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64( %0, @@ -1212,7 +1212,7 @@ define @intrinsic_vmsgtu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsif.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( %0, @@ -1276,7 +1276,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( %0, @@ -1304,7 +1304,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( %0, @@ -1360,7 +1360,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( %0, @@ -1444,7 +1444,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( %0, @@ -1472,7 +1472,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsle.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i64.i64( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( %0, @@ -1696,7 +1696,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( %0, @@ -1724,7 +1724,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( %0, @@ -1752,7 +1752,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( %0, @@ -1780,7 +1780,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( %0, @@ -1808,7 +1808,7 @@ define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( %0, @@ -1836,7 +1836,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( %0, @@ -1864,7 +1864,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( %0, @@ -1892,7 +1892,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( %0, @@ -1920,7 +1920,7 @@ define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vmsle_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vmsle_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv2i64.i64( %0, @@ -2004,7 +2004,7 @@ define @intrinsic_vmsle_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsle.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( %0, @@ -1276,7 +1276,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( %0, @@ -1304,7 +1304,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( %0, @@ -1360,7 +1360,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( %0, @@ -1444,7 +1444,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( %0, @@ -1472,7 +1472,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsleu.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i64.i64( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( %0, @@ -1696,7 +1696,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( %0, @@ -1724,7 +1724,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( %0, @@ -1752,7 +1752,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( %0, @@ -1780,7 +1780,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( %0, @@ -1808,7 +1808,7 @@ define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( %0, @@ -1836,7 +1836,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( %0, @@ -1864,7 +1864,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( %0, @@ -1892,7 +1892,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( %0, @@ -1920,7 +1920,7 @@ define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vmsleu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vmsleu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv2i64.i64( %0, @@ -2004,7 +2004,7 @@ define @intrinsic_vmsleu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsleu.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmslt.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmslt.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsltu.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsltu.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( %0, @@ -728,7 +728,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( %0, @@ -768,7 +768,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( %0, @@ -808,7 +808,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( %0, @@ -888,7 +888,7 @@ define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( %0, @@ -928,7 +928,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( %0, @@ -968,7 +968,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( %0, @@ -1048,7 +1048,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( %0, @@ -1088,7 +1088,7 @@ define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( %0, @@ -1168,7 +1168,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( %0, @@ -1208,7 +1208,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( %0, @@ -1276,7 +1276,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( %0, @@ -1304,7 +1304,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( %0, @@ -1360,7 +1360,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( %0, @@ -1444,7 +1444,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( %0, @@ -1472,7 +1472,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i8( %1, @@ -72,7 +72,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i8( %1, @@ -116,7 +116,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i8( %1, @@ -160,7 +160,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i8( %1, @@ -204,7 +204,7 @@ define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv16i8( %1, @@ -248,7 +248,7 @@ define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv32i8( %1, @@ -292,7 +292,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i16( %1, @@ -336,7 +336,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i16( %1, @@ -380,7 +380,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i16( %1, @@ -424,7 +424,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i16( %1, @@ -468,7 +468,7 @@ define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv16i16( %1, @@ -512,7 +512,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i32( %1, @@ -556,7 +556,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i32( %1, @@ -600,7 +600,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i32( %1, @@ -644,7 +644,7 @@ define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv8i32( %1, @@ -688,7 +688,7 @@ define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv1i64( %1, @@ -732,7 +732,7 @@ define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv2i64( %1, @@ -776,7 +776,7 @@ define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %mask = call @llvm.riscv.vmsne.nxv4i64( %1, @@ -820,7 +820,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( %0, @@ -860,7 +860,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( %0, @@ -900,7 +900,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( %0, @@ -940,7 +940,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( %0, @@ -1020,7 +1020,7 @@ define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( %0, @@ -1060,7 +1060,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( %0, @@ -1140,7 +1140,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( %0, @@ -1180,7 +1180,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( %0, @@ -1220,7 +1220,7 @@ define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( %0, @@ -1340,7 +1340,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( %0, @@ -1380,7 +1380,7 @@ define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( %0, @@ -1420,7 +1420,7 @@ define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( %0, @@ -1460,7 +1460,7 @@ define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i64.i64( %0, @@ -1528,7 +1528,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( %0, @@ -1556,7 +1556,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( %0, @@ -1584,7 +1584,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( %0, @@ -1612,7 +1612,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( %0, @@ -1640,7 +1640,7 @@ define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( %0, @@ -1696,7 +1696,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( %0, @@ -1724,7 +1724,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( %0, @@ -1752,7 +1752,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( %0, @@ -1780,7 +1780,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( %0, @@ -1808,7 +1808,7 @@ define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( %0, @@ -1836,7 +1836,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( %0, @@ -1864,7 +1864,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( %0, @@ -1892,7 +1892,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( %0, @@ -1920,7 +1920,7 @@ define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( %0, @@ -1948,7 +1948,7 @@ define @intrinsic_vmsne_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vmsne_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv2i64.i64( %0, @@ -2004,7 +2004,7 @@ define @intrinsic_vmsne_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vmsne.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll @@ -29,7 +29,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -71,7 +71,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -113,7 +113,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -155,7 +155,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -197,7 +197,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -239,7 +239,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 @@ -281,7 +281,7 @@ ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v25, v0 -; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmsof.m v25, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmul.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulh.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vmulhu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( %0, @@ -924,7 +924,7 @@ define @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( %0, @@ -952,7 +952,7 @@ define @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( %0, @@ -1036,7 +1036,7 @@ define @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( %0, @@ -1064,7 +1064,7 @@ define @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( %0, @@ -1092,7 +1092,7 @@ define @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( %0, @@ -1120,7 +1120,7 @@ define @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( %0, @@ -1176,7 +1176,7 @@ define @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( %0, @@ -1244,7 +1244,7 @@ define @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( %0, @@ -1272,7 +1272,7 @@ define @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( %0, @@ -1328,7 +1328,7 @@ define @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( %0, @@ -1356,7 +1356,7 @@ define @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( %0, @@ -1384,7 +1384,7 @@ define @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( %0, @@ -1412,7 +1412,7 @@ define @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( %0, @@ -1440,7 +1440,7 @@ define @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( %0, @@ -1496,7 +1496,7 @@ define @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( %0, @@ -1524,7 +1524,7 @@ define @intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32( %0, @@ -1552,7 +1552,7 @@ define @intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32( %0, @@ -1580,7 +1580,7 @@ define @intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32( %0, @@ -1608,7 +1608,7 @@ define @intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( %0, @@ -924,7 +924,7 @@ define @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( %0, @@ -952,7 +952,7 @@ define @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( %0, @@ -1036,7 +1036,7 @@ define @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( %0, @@ -1064,7 +1064,7 @@ define @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( %0, @@ -1092,7 +1092,7 @@ define @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( %0, @@ -1120,7 +1120,7 @@ define @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( %0, @@ -1176,7 +1176,7 @@ define @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( %0, @@ -1244,7 +1244,7 @@ define @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( %0, @@ -1272,7 +1272,7 @@ define @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( %0, @@ -1328,7 +1328,7 @@ define @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( %0, @@ -1356,7 +1356,7 @@ define @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( %0, @@ -1384,7 +1384,7 @@ define @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( %0, @@ -1412,7 +1412,7 @@ define @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( %0, @@ -1440,7 +1440,7 @@ define @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( %0, @@ -1496,7 +1496,7 @@ define @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( %0, @@ -1524,7 +1524,7 @@ define @intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32( %0, @@ -1552,7 +1552,7 @@ define @intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32( %0, @@ -1580,7 +1580,7 @@ define @intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32( %0, @@ -1608,7 +1608,7 @@ define @intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i8.i8( %0, @@ -660,7 +660,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i8.i8( %0, @@ -681,7 +681,7 @@ define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i8.i8( %0, @@ -702,7 +702,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i8.i8( %0, @@ -723,7 +723,7 @@ define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i8.i8( %0, @@ -744,7 +744,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i8.i8( %0, @@ -765,7 +765,7 @@ define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv32i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv32i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i16.i16( %0, @@ -912,7 +912,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i16.i16( %0, @@ -933,7 +933,7 @@ define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i16.i16( %0, @@ -954,7 +954,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i16.i16( %0, @@ -975,7 +975,7 @@ define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i16.i16( %0, @@ -996,7 +996,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i16.i16( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i32.i32( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i32.i32( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i32.i32( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i32.i32( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i32.i32( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i32.i32( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64( %0, @@ -660,7 +660,7 @@ define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( %0, @@ -681,7 +681,7 @@ define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64( %0, @@ -702,7 +702,7 @@ define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( %0, @@ -723,7 +723,7 @@ define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64( %0, @@ -744,7 +744,7 @@ define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( %0, @@ -765,7 +765,7 @@ define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i8.i8( %0, @@ -912,7 +912,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i8.i8( %0, @@ -933,7 +933,7 @@ define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i8.i8( %0, @@ -954,7 +954,7 @@ define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i8.i8( %0, @@ -975,7 +975,7 @@ define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv32i8.i8( %0, @@ -996,7 +996,7 @@ define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv32i8.i8( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i16.i16( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i16.i16( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i16.i16( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i16.i16( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv16i16.i16( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv16i16.i16( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i32.i32( %0, @@ -1269,7 +1269,7 @@ define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i32.i32( %0, @@ -1290,7 +1290,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i32.i32( %0, @@ -1311,7 +1311,7 @@ define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i32.i32( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i32.i32( %0, @@ -1353,7 +1353,7 @@ define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv8i32.i32( %0, @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv8i32.i32( %0, @@ -1395,7 +1395,7 @@ define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv1i64.i64( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv1i64.i64( %0, @@ -1437,7 +1437,7 @@ define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv2i64.i64( %0, @@ -1458,7 +1458,7 @@ define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv2i64.i64( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsac.nxv4i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsac.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i8.i8( %0, @@ -660,7 +660,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i8.i8( %0, @@ -681,7 +681,7 @@ define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i8.i8( %0, @@ -702,7 +702,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i8.i8( %0, @@ -723,7 +723,7 @@ define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i8.i8( %0, @@ -744,7 +744,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i8.i8( %0, @@ -765,7 +765,7 @@ define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv32i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv32i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i16.i16( %0, @@ -912,7 +912,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i16.i16( %0, @@ -933,7 +933,7 @@ define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i16.i16( %0, @@ -954,7 +954,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i16.i16( %0, @@ -975,7 +975,7 @@ define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i16.i16( %0, @@ -996,7 +996,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i16.i16( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i32.i32( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i32.i32( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i32.i32( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i32.i32( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i32.i32( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i32.i32( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( %0, @@ -51,7 +51,7 @@ define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8( %0, @@ -72,7 +72,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( %0, @@ -93,7 +93,7 @@ define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8( %0, @@ -114,7 +114,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( %0, @@ -135,7 +135,7 @@ define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8( %0, @@ -156,7 +156,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( %0, @@ -177,7 +177,7 @@ define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8( %0, @@ -198,7 +198,7 @@ define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( %0, @@ -219,7 +219,7 @@ define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8( %0, @@ -240,7 +240,7 @@ define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( %0, @@ -261,7 +261,7 @@ define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16( %0, @@ -282,7 +282,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( %0, @@ -303,7 +303,7 @@ define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16( %0, @@ -324,7 +324,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( %0, @@ -345,7 +345,7 @@ define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16( %0, @@ -366,7 +366,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( %0, @@ -387,7 +387,7 @@ define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16( %0, @@ -408,7 +408,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( %0, @@ -429,7 +429,7 @@ define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16( %0, @@ -450,7 +450,7 @@ define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32( %0, @@ -492,7 +492,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( %0, @@ -513,7 +513,7 @@ define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32( %0, @@ -534,7 +534,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( %0, @@ -555,7 +555,7 @@ define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32( %0, @@ -576,7 +576,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( %0, @@ -597,7 +597,7 @@ define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32( %0, @@ -618,7 +618,7 @@ define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( %0, @@ -639,7 +639,7 @@ define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64( %0, @@ -660,7 +660,7 @@ define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( %0, @@ -681,7 +681,7 @@ define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64( %0, @@ -702,7 +702,7 @@ define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( %0, @@ -723,7 +723,7 @@ define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64( %0, @@ -744,7 +744,7 @@ define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( %0, @@ -765,7 +765,7 @@ define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i8.i8( %0, @@ -786,7 +786,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i8.i8( %0, @@ -807,7 +807,7 @@ define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i8.i8( %0, @@ -849,7 +849,7 @@ define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i8.i8( %0, @@ -870,7 +870,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i8.i8( %0, @@ -891,7 +891,7 @@ define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i8.i8( %0, @@ -912,7 +912,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i8.i8( %0, @@ -933,7 +933,7 @@ define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i8.i8( %0, @@ -954,7 +954,7 @@ define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i8.i8( %0, @@ -975,7 +975,7 @@ define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv32i8.i8( %0, @@ -996,7 +996,7 @@ define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv32i8.i8( %0, @@ -1017,7 +1017,7 @@ define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i16.i16( %0, @@ -1038,7 +1038,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i16.i16( %0, @@ -1059,7 +1059,7 @@ define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i16.i16( %0, @@ -1080,7 +1080,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i16.i16( %0, @@ -1101,7 +1101,7 @@ define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i16.i16( %0, @@ -1122,7 +1122,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i16.i16( %0, @@ -1143,7 +1143,7 @@ define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i16.i16( %0, @@ -1164,7 +1164,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i16.i16( %0, @@ -1185,7 +1185,7 @@ define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv16i16.i16( %0, @@ -1206,7 +1206,7 @@ define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv16i16.i16( %0, @@ -1227,7 +1227,7 @@ define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i32.i32( %0, @@ -1248,7 +1248,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i32.i32( %0, @@ -1269,7 +1269,7 @@ define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i32.i32( %0, @@ -1290,7 +1290,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i32.i32( %0, @@ -1311,7 +1311,7 @@ define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i32.i32( %0, @@ -1332,7 +1332,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i32.i32( %0, @@ -1353,7 +1353,7 @@ define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv8i32.i32( %0, @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv8i32.i32( %0, @@ -1395,7 +1395,7 @@ define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv1i64.i64( %0, @@ -1416,7 +1416,7 @@ define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv1i64.i64( %0, @@ -1437,7 +1437,7 @@ define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv2i64.i64( %0, @@ -1458,7 +1458,7 @@ define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv2i64.i64( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} %a = call @llvm.riscv.vnmsub.nxv4i64.i64( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnmsub.mask.nxv4i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( %0, @@ -924,7 +924,7 @@ define @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( %0, @@ -952,7 +952,7 @@ define @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( %0, @@ -1036,7 +1036,7 @@ define @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( %0, @@ -1064,7 +1064,7 @@ define @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( %0, @@ -1092,7 +1092,7 @@ define @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( %0, @@ -1120,7 +1120,7 @@ define @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( %0, @@ -1176,7 +1176,7 @@ define @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( %0, @@ -1244,7 +1244,7 @@ define @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( %0, @@ -1272,7 +1272,7 @@ define @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( %0, @@ -1328,7 +1328,7 @@ define @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( %0, @@ -1356,7 +1356,7 @@ define @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( %0, @@ -1384,7 +1384,7 @@ define @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( %0, @@ -1412,7 +1412,7 @@ define @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( %0, @@ -1440,7 +1440,7 @@ define @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( %0, @@ -1496,7 +1496,7 @@ define @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( %0, @@ -1524,7 +1524,7 @@ define @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( %0, @@ -1552,7 +1552,7 @@ define @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( %0, @@ -1580,7 +1580,7 @@ define @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( %0, @@ -1608,7 +1608,7 @@ define @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( %0, @@ -896,7 +896,7 @@ define @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( %0, @@ -924,7 +924,7 @@ define @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( %0, @@ -952,7 +952,7 @@ define @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( %0, @@ -1036,7 +1036,7 @@ define @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( %0, @@ -1064,7 +1064,7 @@ define @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( %0, @@ -1092,7 +1092,7 @@ define @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( %0, @@ -1120,7 +1120,7 @@ define @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( %0, @@ -1176,7 +1176,7 @@ define @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( %0, @@ -1244,7 +1244,7 @@ define @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( %0, @@ -1272,7 +1272,7 @@ define @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( %0, @@ -1300,7 +1300,7 @@ define @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( %0, @@ -1328,7 +1328,7 @@ define @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( %0, @@ -1356,7 +1356,7 @@ define @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( %0, @@ -1384,7 +1384,7 @@ define @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( %0, @@ -1412,7 +1412,7 @@ define @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( %0, @@ -1440,7 +1440,7 @@ define @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( %0, @@ -1496,7 +1496,7 @@ define @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( %0, @@ -1524,7 +1524,7 @@ define @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( %0, @@ -1552,7 +1552,7 @@ define @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( %0, @@ -1580,7 +1580,7 @@ define @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( %0, @@ -1608,7 +1608,7 @@ define @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vor.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vor.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrem.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vremu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -32,7 +32,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -77,7 +77,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -122,7 +122,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -167,7 +167,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -212,7 +212,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -259,7 +259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -309,7 +309,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -354,7 +354,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -399,7 +399,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -444,7 +444,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -489,7 +489,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -536,7 +536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -586,7 +586,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -676,7 +676,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -768,7 +768,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -818,7 +818,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -908,7 +908,7 @@ define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -953,7 +953,7 @@ define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -998,7 +998,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1045,7 +1045,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1095,7 +1095,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1140,7 +1140,7 @@ define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1185,7 +1185,7 @@ define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1230,7 +1230,7 @@ define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1277,7 +1277,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1327,7 +1327,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1372,7 +1372,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1417,7 +1417,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1507,7 +1507,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1552,7 +1552,7 @@ define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1597,7 +1597,7 @@ define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1644,7 +1644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1689,7 +1689,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1734,7 +1734,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1779,7 +1779,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1824,7 +1824,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1869,7 +1869,7 @@ define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1916,7 +1916,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2006,7 +2006,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2051,7 +2051,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2096,7 +2096,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2143,7 +2143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2188,7 +2188,7 @@ define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2233,7 +2233,7 @@ define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2278,7 +2278,7 @@ define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2323,7 +2323,7 @@ define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2368,7 +2368,7 @@ define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2415,7 +2415,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2550,7 +2550,7 @@ define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2595,7 +2595,7 @@ define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2642,7 +2642,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2675,7 +2675,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2708,7 +2708,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2741,7 +2741,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2774,7 +2774,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2807,7 +2807,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2840,7 +2840,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2875,7 +2875,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2908,7 +2908,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2941,7 +2941,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2974,7 +2974,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3007,7 +3007,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3040,7 +3040,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3075,7 +3075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3108,7 +3108,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3141,7 +3141,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3174,7 +3174,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3207,7 +3207,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3242,7 +3242,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3275,7 +3275,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3308,7 +3308,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3341,7 +3341,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3374,7 +3374,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3407,7 +3407,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3442,7 +3442,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3475,7 +3475,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3508,7 +3508,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3541,7 +3541,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3574,7 +3574,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3609,7 +3609,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -32,7 +32,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -77,7 +77,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -122,7 +122,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -167,7 +167,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -212,7 +212,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -259,7 +259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -309,7 +309,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -354,7 +354,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -399,7 +399,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -444,7 +444,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -489,7 +489,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -536,7 +536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -586,7 +586,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -631,7 +631,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -676,7 +676,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -721,7 +721,7 @@ define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -768,7 +768,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -818,7 +818,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -863,7 +863,7 @@ define @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -908,7 +908,7 @@ define @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -955,7 +955,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1005,7 +1005,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1050,7 +1050,7 @@ define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1140,7 +1140,7 @@ define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1185,7 +1185,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1232,7 +1232,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1372,7 +1372,7 @@ define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1417,7 +1417,7 @@ define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1464,7 +1464,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1514,7 +1514,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1559,7 +1559,7 @@ define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1604,7 +1604,7 @@ define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1651,7 +1651,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1701,7 +1701,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1746,7 +1746,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1791,7 +1791,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1836,7 +1836,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1881,7 +1881,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1971,7 +1971,7 @@ define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2018,7 +2018,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2063,7 +2063,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2108,7 +2108,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2153,7 +2153,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2198,7 +2198,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2243,7 +2243,7 @@ define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2290,7 +2290,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2335,7 +2335,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2380,7 +2380,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2425,7 +2425,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2470,7 +2470,7 @@ define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2517,7 +2517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2562,7 +2562,7 @@ define @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2607,7 +2607,7 @@ define @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2699,7 +2699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2744,7 +2744,7 @@ define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2789,7 +2789,7 @@ define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2834,7 +2834,7 @@ define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2879,7 +2879,7 @@ define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2924,7 +2924,7 @@ define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2971,7 +2971,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3016,7 +3016,7 @@ define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3061,7 +3061,7 @@ define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3106,7 +3106,7 @@ define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3151,7 +3151,7 @@ define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3198,7 +3198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3243,7 +3243,7 @@ define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3288,7 +3288,7 @@ define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3333,7 +3333,7 @@ define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3380,7 +3380,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3413,7 +3413,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3446,7 +3446,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3479,7 +3479,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3512,7 +3512,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3545,7 +3545,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3578,7 +3578,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3613,7 +3613,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3646,7 +3646,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3679,7 +3679,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3712,7 +3712,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3745,7 +3745,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3778,7 +3778,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3813,7 +3813,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3846,7 +3846,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3879,7 +3879,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3912,7 +3912,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3945,7 +3945,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -3980,7 +3980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4013,7 +4013,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4046,7 +4046,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4079,7 +4079,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4114,7 +4114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4147,7 +4147,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4180,7 +4180,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4213,7 +4213,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4246,7 +4246,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4279,7 +4279,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4314,7 +4314,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4347,7 +4347,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4380,7 +4380,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4413,7 +4413,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4446,7 +4446,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4481,7 +4481,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4514,7 +4514,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4547,7 +4547,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4580,7 +4580,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu ; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -4615,7 +4615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, @@ -736,7 +736,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, @@ -764,7 +764,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, @@ -792,7 +792,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, @@ -820,7 +820,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, @@ -848,7 +848,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, @@ -876,7 +876,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, @@ -904,7 +904,7 @@ define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, @@ -932,7 +932,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, @@ -960,7 +960,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, @@ -1016,7 +1016,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, @@ -1044,7 +1044,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, @@ -1072,7 +1072,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, @@ -1100,7 +1100,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, @@ -1128,7 +1128,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, @@ -1156,7 +1156,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, @@ -1184,7 +1184,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, @@ -1212,7 +1212,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( %0, @@ -896,7 +896,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, @@ -924,7 +924,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, @@ -952,7 +952,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, @@ -980,7 +980,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, @@ -1008,7 +1008,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, @@ -1036,7 +1036,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, @@ -1064,7 +1064,7 @@ define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, @@ -1092,7 +1092,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, @@ -1120,7 +1120,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, @@ -1176,7 +1176,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, @@ -1204,7 +1204,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, @@ -1232,7 +1232,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, @@ -1260,7 +1260,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, @@ -1288,7 +1288,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, @@ -1316,7 +1316,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, @@ -1344,7 +1344,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, @@ -1372,7 +1372,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, @@ -1400,7 +1400,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsaddu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -31,7 +31,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -75,7 +75,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -119,7 +119,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -163,7 +163,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -207,7 +207,7 @@ define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -251,7 +251,7 @@ define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -341,7 +341,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -385,7 +385,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -429,7 +429,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -473,7 +473,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -517,7 +517,7 @@ define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -563,7 +563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -607,7 +607,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -651,7 +651,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -739,7 +739,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -31,7 +31,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -75,7 +75,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -119,7 +119,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -163,7 +163,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -207,7 +207,7 @@ define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -251,7 +251,7 @@ define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -341,7 +341,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -385,7 +385,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -429,7 +429,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -473,7 +473,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -517,7 +517,7 @@ define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -563,7 +563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -607,7 +607,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -651,7 +651,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -739,7 +739,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -829,7 +829,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -873,7 +873,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -917,7 +917,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -963,7 +963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -32,7 +32,7 @@ define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -77,7 +77,7 @@ define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -122,7 +122,7 @@ define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -167,7 +167,7 @@ define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -212,7 +212,7 @@ define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -257,7 +257,7 @@ define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m8,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -349,7 +349,7 @@ define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -394,7 +394,7 @@ define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -439,7 +439,7 @@ define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -484,7 +484,7 @@ define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -529,7 +529,7 @@ define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -576,7 +576,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -621,7 +621,7 @@ define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -666,7 +666,7 @@ define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -711,7 +711,7 @@ define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -756,7 +756,7 @@ define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -803,7 +803,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -848,7 +848,7 @@ define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v17, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -893,7 +893,7 @@ define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v18, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -938,7 +938,7 @@ define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v20, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -985,7 +985,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a1, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i8( %0, @@ -45,7 +45,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i8( %0, @@ -59,7 +59,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i8( %0, @@ -80,7 +80,7 @@ define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i8( %0, @@ -101,7 +101,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i8( %0, @@ -116,7 +116,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i8( %0, @@ -130,7 +130,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i8( %0, @@ -151,7 +151,7 @@ define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i8( %0, @@ -172,7 +172,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i8( %0, @@ -187,7 +187,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i8( %0, @@ -201,7 +201,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i8( %0, @@ -222,7 +222,7 @@ define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i8( %0, @@ -243,7 +243,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i8( %0, @@ -258,7 +258,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i8( %0, @@ -272,7 +272,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i8( %0, @@ -293,7 +293,7 @@ define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16i8( %0, @@ -314,7 +314,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i8( %0, @@ -329,7 +329,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16i8( %0, @@ -343,7 +343,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i8( %0, @@ -364,7 +364,7 @@ define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv32i8( %0, @@ -385,7 +385,7 @@ define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv32i8( %0, @@ -400,7 +400,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv32i8( %0, @@ -414,7 +414,7 @@ define @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv32i8( %0, @@ -435,7 +435,7 @@ define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i16( %0, @@ -456,7 +456,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i16( %0, @@ -485,7 +485,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i16( %0, @@ -506,7 +506,7 @@ define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i16( %0, @@ -527,7 +527,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i16( %0, @@ -542,7 +542,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i16( %0, @@ -556,7 +556,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i16( %0, @@ -577,7 +577,7 @@ define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i16( %0, @@ -598,7 +598,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i16( %0, @@ -613,7 +613,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i16( %0, @@ -627,7 +627,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i16( %0, @@ -648,7 +648,7 @@ define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i16( %0, @@ -684,7 +684,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i16( %0, @@ -698,7 +698,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i16( %0, @@ -719,7 +719,7 @@ define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16i16( %0, @@ -740,7 +740,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i16( %0, @@ -755,7 +755,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16i16( %0, @@ -769,7 +769,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i16( %0, @@ -790,7 +790,7 @@ define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i32( %0, @@ -811,7 +811,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i32( %0, @@ -826,7 +826,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i32( %0, @@ -840,7 +840,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i32( %0, @@ -861,7 +861,7 @@ define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i32( %0, @@ -882,7 +882,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i32( %0, @@ -897,7 +897,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i32( %0, @@ -911,7 +911,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i32( %0, @@ -932,7 +932,7 @@ define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i32( %0, @@ -953,7 +953,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i32( %0, @@ -968,7 +968,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i32( %0, @@ -982,7 +982,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i32( %0, @@ -1003,7 +1003,7 @@ define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i32( %0, @@ -1024,7 +1024,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i32( %0, @@ -1039,7 +1039,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i32( %0, @@ -1053,7 +1053,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i32( %0, @@ -1074,7 +1074,7 @@ define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1f16( %0, @@ -1095,7 +1095,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f16( %0, @@ -1110,7 +1110,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1f16( %0, @@ -1124,7 +1124,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f16( %0, @@ -1145,7 +1145,7 @@ define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2f16( %0, @@ -1166,7 +1166,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f16( %0, @@ -1181,7 +1181,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2f16( %0, @@ -1195,7 +1195,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f16( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4f16( %0, @@ -1237,7 +1237,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f16( %0, @@ -1252,7 +1252,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4f16( %0, @@ -1266,7 +1266,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f16( %0, @@ -1287,7 +1287,7 @@ define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8f16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f16( %0, @@ -1323,7 +1323,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8f16( %0, @@ -1337,7 +1337,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f16( %0, @@ -1358,7 +1358,7 @@ define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16f16( %0, @@ -1379,7 +1379,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16f16( %0, @@ -1394,7 +1394,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16f16( %0, @@ -1408,7 +1408,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16f16( %0, @@ -1429,7 +1429,7 @@ define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1f32( %0, @@ -1450,7 +1450,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f32( %0, @@ -1465,7 +1465,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1f32( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f32( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2f32( %0, @@ -1521,7 +1521,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f32( %0, @@ -1536,7 +1536,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2f32( %0, @@ -1550,7 +1550,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f32( %0, @@ -1571,7 +1571,7 @@ define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4f32( %0, @@ -1592,7 +1592,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f32( %0, @@ -1607,7 +1607,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4f32( %0, @@ -1621,7 +1621,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f32( %0, @@ -1642,7 +1642,7 @@ define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8f32( %0, @@ -1663,7 +1663,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f32( %0, @@ -1678,7 +1678,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8f32( %0, @@ -1692,7 +1692,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i8( %0, @@ -45,7 +45,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i8( %0, @@ -59,7 +59,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i8( %0, @@ -80,7 +80,7 @@ define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i8( %0, @@ -101,7 +101,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i8( %0, @@ -116,7 +116,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i8( %0, @@ -130,7 +130,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i8( %0, @@ -151,7 +151,7 @@ define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i8( %0, @@ -172,7 +172,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i8( %0, @@ -187,7 +187,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i8( %0, @@ -201,7 +201,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i8( %0, @@ -222,7 +222,7 @@ define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i8( %0, @@ -243,7 +243,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i8( %0, @@ -258,7 +258,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i8( %0, @@ -272,7 +272,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i8( %0, @@ -293,7 +293,7 @@ define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16i8( %0, @@ -314,7 +314,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i8( %0, @@ -329,7 +329,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16i8( %0, @@ -343,7 +343,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i8( %0, @@ -364,7 +364,7 @@ define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv32i8( %0, @@ -385,7 +385,7 @@ define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv32i8( %0, @@ -400,7 +400,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv32i8( %0, @@ -414,7 +414,7 @@ define @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv32i8( %0, @@ -435,7 +435,7 @@ define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i16( %0, @@ -456,7 +456,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i16( %0, @@ -485,7 +485,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i16( %0, @@ -506,7 +506,7 @@ define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i16( %0, @@ -527,7 +527,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i16( %0, @@ -542,7 +542,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i16( %0, @@ -556,7 +556,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i16( %0, @@ -577,7 +577,7 @@ define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i16( %0, @@ -598,7 +598,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i16( %0, @@ -613,7 +613,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i16( %0, @@ -627,7 +627,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i16( %0, @@ -648,7 +648,7 @@ define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i16( %0, @@ -684,7 +684,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i16( %0, @@ -698,7 +698,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i16( %0, @@ -719,7 +719,7 @@ define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16i16( %0, @@ -740,7 +740,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i16( %0, @@ -755,7 +755,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16i16( %0, @@ -769,7 +769,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16i16( %0, @@ -790,7 +790,7 @@ define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i32( %0, @@ -811,7 +811,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i32( %0, @@ -826,7 +826,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i32( %0, @@ -840,7 +840,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i32( %0, @@ -861,7 +861,7 @@ define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i32( %0, @@ -882,7 +882,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i32( %0, @@ -897,7 +897,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i32( %0, @@ -911,7 +911,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i32( %0, @@ -932,7 +932,7 @@ define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i32( %0, @@ -953,7 +953,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i32( %0, @@ -968,7 +968,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i32( %0, @@ -982,7 +982,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i32( %0, @@ -1003,7 +1003,7 @@ define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8i32( %0, @@ -1024,7 +1024,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i32( %0, @@ -1039,7 +1039,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8i32( %0, @@ -1053,7 +1053,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8i32( %0, @@ -1074,7 +1074,7 @@ define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1i64( %0, @@ -1095,7 +1095,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i64( %0, @@ -1110,7 +1110,7 @@ define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1i64( %0, @@ -1124,7 +1124,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1i64( %0, @@ -1145,7 +1145,7 @@ define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2i64( %0, @@ -1166,7 +1166,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i64( %0, @@ -1181,7 +1181,7 @@ define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2i64( %0, @@ -1195,7 +1195,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2i64( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4i64( %0, @@ -1237,7 +1237,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i64( %0, @@ -1252,7 +1252,7 @@ define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4i64( %0, @@ -1266,7 +1266,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4i64( %0, @@ -1287,7 +1287,7 @@ define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1f16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f16( %0, @@ -1323,7 +1323,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1f16( %0, @@ -1337,7 +1337,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f16( %0, @@ -1358,7 +1358,7 @@ define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2f16( %0, @@ -1379,7 +1379,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f16( %0, @@ -1394,7 +1394,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2f16( %0, @@ -1408,7 +1408,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f16( %0, @@ -1429,7 +1429,7 @@ define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4f16( %0, @@ -1450,7 +1450,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f16( %0, @@ -1465,7 +1465,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4f16( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8f16( %0, @@ -1521,7 +1521,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f16( %0, @@ -1536,7 +1536,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8f16( %0, @@ -1550,7 +1550,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f16( %0, @@ -1571,7 +1571,7 @@ define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv16f16( %0, @@ -1592,7 +1592,7 @@ define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16f16( %0, @@ -1607,7 +1607,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv16f16( %0, @@ -1621,7 +1621,7 @@ define @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv16f16( %0, @@ -1642,7 +1642,7 @@ define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1f32( %0, @@ -1663,7 +1663,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f32( %0, @@ -1678,7 +1678,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1f32( %0, @@ -1692,7 +1692,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f32( %0, @@ -1713,7 +1713,7 @@ define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2f32( %0, @@ -1734,7 +1734,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f32( %0, @@ -1749,7 +1749,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2f32( %0, @@ -1763,7 +1763,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f32( %0, @@ -1784,7 +1784,7 @@ define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4f32( %0, @@ -1805,7 +1805,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f32( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4f32( %0, @@ -1834,7 +1834,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f32( %0, @@ -1855,7 +1855,7 @@ define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv8f32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f32( %0, @@ -1891,7 +1891,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv8f32( %0, @@ -1905,7 +1905,7 @@ define @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv8f32( %0, @@ -1926,7 +1926,7 @@ define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv1f64( %0, @@ -1947,7 +1947,7 @@ define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f64( %0, @@ -1962,7 +1962,7 @@ define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv1f64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv1f64( %0, @@ -1997,7 +1997,7 @@ define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv2f64( %0, @@ -2018,7 +2018,7 @@ define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f64( %0, @@ -2033,7 +2033,7 @@ define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv2f64( %0, @@ -2047,7 +2047,7 @@ define @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv2f64( %0, @@ -2068,7 +2068,7 @@ define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslidedown.nxv4f64( %0, @@ -2089,7 +2089,7 @@ define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f64( %0, @@ -2104,7 +2104,7 @@ define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslidedown.nxv4f64( %0, @@ -2118,7 +2118,7 @@ define @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslidedown.mask.nxv4f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i8( %0, @@ -45,7 +45,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i8( %0, @@ -59,7 +59,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i8( %0, @@ -80,7 +80,7 @@ define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i8( %0, @@ -101,7 +101,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i8( %0, @@ -116,7 +116,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i8( %0, @@ -130,7 +130,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i8( %0, @@ -151,7 +151,7 @@ define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i8( %0, @@ -172,7 +172,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i8( %0, @@ -187,7 +187,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i8( %0, @@ -201,7 +201,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i8( %0, @@ -222,7 +222,7 @@ define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i8( %0, @@ -243,7 +243,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i8( %0, @@ -258,7 +258,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i8( %0, @@ -272,7 +272,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i8( %0, @@ -293,7 +293,7 @@ define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16i8( %0, @@ -314,7 +314,7 @@ define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i8( %0, @@ -329,7 +329,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16i8( %0, @@ -343,7 +343,7 @@ define @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i8( %0, @@ -364,7 +364,7 @@ define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv32i8( %0, @@ -385,7 +385,7 @@ define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv32i8( %0, @@ -400,7 +400,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv32i8( %0, @@ -414,7 +414,7 @@ define @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv32i8( %0, @@ -435,7 +435,7 @@ define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i16( %0, @@ -456,7 +456,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i16( %0, @@ -485,7 +485,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i16( %0, @@ -506,7 +506,7 @@ define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i16( %0, @@ -527,7 +527,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i16( %0, @@ -542,7 +542,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i16( %0, @@ -556,7 +556,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i16( %0, @@ -577,7 +577,7 @@ define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i16( %0, @@ -598,7 +598,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i16( %0, @@ -613,7 +613,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i16( %0, @@ -627,7 +627,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i16( %0, @@ -648,7 +648,7 @@ define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i16( %0, @@ -684,7 +684,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i16( %0, @@ -698,7 +698,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i16( %0, @@ -719,7 +719,7 @@ define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16i16( %0, @@ -740,7 +740,7 @@ define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i16( %0, @@ -755,7 +755,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16i16( %0, @@ -769,7 +769,7 @@ define @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i16( %0, @@ -790,7 +790,7 @@ define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i32( %0, @@ -811,7 +811,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i32( %0, @@ -826,7 +826,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i32( %0, @@ -840,7 +840,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i32( %0, @@ -861,7 +861,7 @@ define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i32( %0, @@ -882,7 +882,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i32( %0, @@ -897,7 +897,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i32( %0, @@ -911,7 +911,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i32( %0, @@ -932,7 +932,7 @@ define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i32( %0, @@ -953,7 +953,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i32( %0, @@ -968,7 +968,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i32( %0, @@ -982,7 +982,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i32( %0, @@ -1003,7 +1003,7 @@ define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i32( %0, @@ -1024,7 +1024,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i32( %0, @@ -1039,7 +1039,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i32( %0, @@ -1053,7 +1053,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i32( %0, @@ -1074,7 +1074,7 @@ define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1f16( %0, @@ -1095,7 +1095,7 @@ define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f16( %0, @@ -1110,7 +1110,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1f16( %0, @@ -1124,7 +1124,7 @@ define @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f16( %0, @@ -1145,7 +1145,7 @@ define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2f16( %0, @@ -1166,7 +1166,7 @@ define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f16( %0, @@ -1181,7 +1181,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2f16( %0, @@ -1195,7 +1195,7 @@ define @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f16( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4f16( %0, @@ -1237,7 +1237,7 @@ define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f16( %0, @@ -1252,7 +1252,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4f16( %0, @@ -1266,7 +1266,7 @@ define @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f16( %0, @@ -1287,7 +1287,7 @@ define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8f16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f16( %0, @@ -1323,7 +1323,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8f16( %0, @@ -1337,7 +1337,7 @@ define @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f16( %0, @@ -1358,7 +1358,7 @@ define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16f16( %0, @@ -1379,7 +1379,7 @@ define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16f16( %0, @@ -1394,7 +1394,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16f16( %0, @@ -1408,7 +1408,7 @@ define @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16f16( %0, @@ -1429,7 +1429,7 @@ define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1f32( %0, @@ -1450,7 +1450,7 @@ define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f32( %0, @@ -1465,7 +1465,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1f32( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f32( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2f32( %0, @@ -1521,7 +1521,7 @@ define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f32( %0, @@ -1536,7 +1536,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2f32( %0, @@ -1550,7 +1550,7 @@ define @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f32( %0, @@ -1571,7 +1571,7 @@ define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4f32( %0, @@ -1592,7 +1592,7 @@ define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f32( %0, @@ -1607,7 +1607,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4f32( %0, @@ -1621,7 +1621,7 @@ define @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f32( %0, @@ -1642,7 +1642,7 @@ define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8f32( %0, @@ -1663,7 +1663,7 @@ define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f32( %0, @@ -1678,7 +1678,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8f32( %0, @@ -1692,7 +1692,7 @@ define @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i8( %0, @@ -30,7 +30,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i8( %0, @@ -45,7 +45,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i8( %0, @@ -59,7 +59,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i8( %0, @@ -80,7 +80,7 @@ define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i8( %0, @@ -101,7 +101,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i8( %0, @@ -116,7 +116,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i8( %0, @@ -130,7 +130,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i8( %0, @@ -151,7 +151,7 @@ define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i8( %0, @@ -172,7 +172,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i8( %0, @@ -187,7 +187,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i8( %0, @@ -201,7 +201,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i8( %0, @@ -222,7 +222,7 @@ define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i8( %0, @@ -243,7 +243,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i8( %0, @@ -258,7 +258,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i8( %0, @@ -272,7 +272,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i8( %0, @@ -293,7 +293,7 @@ define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16i8( %0, @@ -314,7 +314,7 @@ define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i8( %0, @@ -329,7 +329,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16i8( %0, @@ -343,7 +343,7 @@ define @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i8( %0, @@ -364,7 +364,7 @@ define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv32i8( %0, @@ -385,7 +385,7 @@ define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv32i8( %0, @@ -400,7 +400,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv32i8( %0, @@ -414,7 +414,7 @@ define @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv32i8( %0, @@ -435,7 +435,7 @@ define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i16( %0, @@ -456,7 +456,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i16( %0, @@ -471,7 +471,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i16( %0, @@ -485,7 +485,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i16( %0, @@ -506,7 +506,7 @@ define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i16( %0, @@ -527,7 +527,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i16( %0, @@ -542,7 +542,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i16( %0, @@ -556,7 +556,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i16( %0, @@ -577,7 +577,7 @@ define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i16( %0, @@ -598,7 +598,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i16( %0, @@ -613,7 +613,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i16( %0, @@ -627,7 +627,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i16( %0, @@ -648,7 +648,7 @@ define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i16( %0, @@ -669,7 +669,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i16( %0, @@ -684,7 +684,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i16( %0, @@ -698,7 +698,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i16( %0, @@ -719,7 +719,7 @@ define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16i16( %0, @@ -740,7 +740,7 @@ define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i16( %0, @@ -755,7 +755,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16i16( %0, @@ -769,7 +769,7 @@ define @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16i16( %0, @@ -790,7 +790,7 @@ define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i32( %0, @@ -811,7 +811,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i32( %0, @@ -826,7 +826,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i32( %0, @@ -840,7 +840,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i32( %0, @@ -861,7 +861,7 @@ define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i32( %0, @@ -882,7 +882,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i32( %0, @@ -897,7 +897,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i32( %0, @@ -911,7 +911,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i32( %0, @@ -932,7 +932,7 @@ define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i32( %0, @@ -953,7 +953,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i32( %0, @@ -968,7 +968,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i32( %0, @@ -982,7 +982,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i32( %0, @@ -1003,7 +1003,7 @@ define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8i32( %0, @@ -1024,7 +1024,7 @@ define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i32( %0, @@ -1039,7 +1039,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8i32( %0, @@ -1053,7 +1053,7 @@ define @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8i32( %0, @@ -1074,7 +1074,7 @@ define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1i64( %0, @@ -1095,7 +1095,7 @@ define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i64( %0, @@ -1110,7 +1110,7 @@ define @intrinsic_vslideup_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1i64( %0, @@ -1124,7 +1124,7 @@ define @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1i64( %0, @@ -1145,7 +1145,7 @@ define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2i64( %0, @@ -1166,7 +1166,7 @@ define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i64( %0, @@ -1181,7 +1181,7 @@ define @intrinsic_vslideup_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2i64( %0, @@ -1195,7 +1195,7 @@ define @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2i64( %0, @@ -1216,7 +1216,7 @@ define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4i64( %0, @@ -1237,7 +1237,7 @@ define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i64( %0, @@ -1252,7 +1252,7 @@ define @intrinsic_vslideup_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4i64( %0, @@ -1266,7 +1266,7 @@ define @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4i64( %0, @@ -1287,7 +1287,7 @@ define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1f16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f16( %0, @@ -1323,7 +1323,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1f16( %0, @@ -1337,7 +1337,7 @@ define @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f16( %0, @@ -1358,7 +1358,7 @@ define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2f16( %0, @@ -1379,7 +1379,7 @@ define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f16( %0, @@ -1394,7 +1394,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2f16( %0, @@ -1408,7 +1408,7 @@ define @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f16( %0, @@ -1429,7 +1429,7 @@ define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4f16( %0, @@ -1450,7 +1450,7 @@ define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f16( %0, @@ -1465,7 +1465,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4f16( %0, @@ -1479,7 +1479,7 @@ define @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f16( %0, @@ -1500,7 +1500,7 @@ define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8f16( %0, @@ -1521,7 +1521,7 @@ define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f16( %0, @@ -1536,7 +1536,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8f16( %0, @@ -1550,7 +1550,7 @@ define @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f16( %0, @@ -1571,7 +1571,7 @@ define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv16f16( %0, @@ -1592,7 +1592,7 @@ define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16f16( %0, @@ -1607,7 +1607,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv16f16( %0, @@ -1621,7 +1621,7 @@ define @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv16f16( %0, @@ -1642,7 +1642,7 @@ define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1f32( %0, @@ -1663,7 +1663,7 @@ define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f32( %0, @@ -1678,7 +1678,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1f32( %0, @@ -1692,7 +1692,7 @@ define @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f32( %0, @@ -1713,7 +1713,7 @@ define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2f32( %0, @@ -1734,7 +1734,7 @@ define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f32( %0, @@ -1749,7 +1749,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2f32( %0, @@ -1763,7 +1763,7 @@ define @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f32( %0, @@ -1784,7 +1784,7 @@ define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4f32( %0, @@ -1805,7 +1805,7 @@ define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f32( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4f32( %0, @@ -1834,7 +1834,7 @@ define @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f32( %0, @@ -1855,7 +1855,7 @@ define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv8f32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f32( %0, @@ -1891,7 +1891,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv8f32( %0, @@ -1905,7 +1905,7 @@ define @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv8f32( %0, @@ -1926,7 +1926,7 @@ define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv1f64( %0, @@ -1947,7 +1947,7 @@ define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f64( %0, @@ -1962,7 +1962,7 @@ define @intrinsic_vslideup_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv1f64( %0, @@ -1976,7 +1976,7 @@ define @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv1f64( %0, @@ -1997,7 +1997,7 @@ define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv2f64( %0, @@ -2018,7 +2018,7 @@ define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f64( %0, @@ -2033,7 +2033,7 @@ define @intrinsic_vslideup_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv2f64( %0, @@ -2047,7 +2047,7 @@ define @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv2f64( %0, @@ -2068,7 +2068,7 @@ define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 %a = call @llvm.riscv.vslideup.nxv4f64( %0, @@ -2089,7 +2089,7 @@ define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f64( %0, @@ -2104,7 +2104,7 @@ define @intrinsic_vslideup_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vslideup.nxv4f64( %0, @@ -2118,7 +2118,7 @@ define @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vslideup.mask.nxv4f64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsll.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsmul.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsra.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vsrl.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssub.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vssubu.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vsub.mask.nxv8i64.i64( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -666,7 +666,7 @@ define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -689,7 +689,7 @@ define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -712,7 +712,7 @@ define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -735,7 +735,7 @@ define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -760,7 +760,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -808,7 +808,7 @@ define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -831,7 +831,7 @@ define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -854,7 +854,7 @@ define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -877,7 +877,7 @@ define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -900,7 +900,7 @@ define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -923,7 +923,7 @@ define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -946,7 +946,7 @@ define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -969,7 +969,7 @@ define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -669,7 +669,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -718,7 +718,7 @@ define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -741,7 +741,7 @@ define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -764,7 +764,7 @@ define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -787,7 +787,7 @@ define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -833,7 +833,7 @@ define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -856,7 +856,7 @@ define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -879,7 +879,7 @@ define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -902,7 +902,7 @@ define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -925,7 +925,7 @@ define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -950,7 +950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -975,7 +975,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1021,7 +1021,7 @@ define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1044,7 +1044,7 @@ define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1067,7 +1067,7 @@ define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1090,7 +1090,7 @@ define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1113,7 +1113,7 @@ define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1136,7 +1136,7 @@ define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1159,7 +1159,7 @@ define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1184,7 +1184,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1232,7 +1232,7 @@ define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1255,7 +1255,7 @@ define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1278,7 +1278,7 @@ define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1324,7 +1324,7 @@ define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1347,7 +1347,7 @@ define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1372,7 +1372,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1397,7 +1397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -666,7 +666,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -689,7 +689,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -712,7 +712,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -735,7 +735,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -760,7 +760,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -808,7 +808,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -831,7 +831,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -854,7 +854,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -877,7 +877,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -900,7 +900,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -923,7 +923,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -946,7 +946,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -969,7 +969,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -669,7 +669,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -718,7 +718,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -741,7 +741,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -764,7 +764,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -787,7 +787,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -833,7 +833,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -856,7 +856,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -879,7 +879,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -902,7 +902,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -925,7 +925,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -950,7 +950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -975,7 +975,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1021,7 +1021,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1044,7 +1044,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1067,7 +1067,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1090,7 +1090,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1113,7 +1113,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1136,7 +1136,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1159,7 +1159,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1184,7 +1184,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1232,7 +1232,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1255,7 +1255,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1278,7 +1278,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1324,7 +1324,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1347,7 +1347,7 @@ define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1372,7 +1372,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1397,7 +1397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -666,7 +666,7 @@ define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -689,7 +689,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -712,7 +712,7 @@ define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -735,7 +735,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -760,7 +760,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -808,7 +808,7 @@ define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -831,7 +831,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -854,7 +854,7 @@ define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -877,7 +877,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -900,7 +900,7 @@ define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -923,7 +923,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -946,7 +946,7 @@ define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -969,7 +969,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -292,7 +292,7 @@ define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -315,7 +315,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -338,7 +338,7 @@ define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -361,7 +361,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -384,7 +384,7 @@ define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -407,7 +407,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -479,7 +479,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -505,7 +505,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -551,7 +551,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -574,7 +574,7 @@ define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -597,7 +597,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -620,7 +620,7 @@ define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -643,7 +643,7 @@ define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -669,7 +669,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -695,7 +695,7 @@ ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a0, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -718,7 +718,7 @@ define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -741,7 +741,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -764,7 +764,7 @@ define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -787,7 +787,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -833,7 +833,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -856,7 +856,7 @@ define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -879,7 +879,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -902,7 +902,7 @@ define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -925,7 +925,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -950,7 +950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -975,7 +975,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1021,7 +1021,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1044,7 +1044,7 @@ define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1067,7 +1067,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1090,7 +1090,7 @@ define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1113,7 +1113,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1136,7 +1136,7 @@ define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1159,7 +1159,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1184,7 +1184,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1232,7 +1232,7 @@ define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1255,7 +1255,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1278,7 +1278,7 @@ define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1324,7 +1324,7 @@ define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1347,7 +1347,7 @@ define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1372,7 +1372,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -1397,7 +1397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -242,7 +242,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -382,7 +382,7 @@ define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -405,7 +405,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -428,7 +428,7 @@ define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -451,7 +451,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -476,7 +476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -33,7 +33,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -56,7 +56,7 @@ define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -79,7 +79,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -102,7 +102,7 @@ define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -125,7 +125,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -148,7 +148,7 @@ define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -171,7 +171,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -194,7 +194,7 @@ define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -217,7 +217,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -242,7 +242,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -267,7 +267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -290,7 +290,7 @@ define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -313,7 +313,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -336,7 +336,7 @@ define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -359,7 +359,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -382,7 +382,7 @@ define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -405,7 +405,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -428,7 +428,7 @@ define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -451,7 +451,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -476,7 +476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -501,7 +501,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -524,7 +524,7 @@ define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -547,7 +547,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -570,7 +570,7 @@ define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -593,7 +593,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -616,7 +616,7 @@ define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -639,7 +639,7 @@ define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -664,7 +664,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -689,7 +689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu ; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( %0, @@ -308,7 +308,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32( %0, @@ -508,7 +508,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32( %0, @@ -548,7 +548,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( %0, @@ -668,7 +668,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( %0, @@ -708,7 +708,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( %0, @@ -748,7 +748,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( %0, @@ -908,7 +908,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( %0, @@ -948,7 +948,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( %0, @@ -988,7 +988,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -28,7 +28,7 @@ define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( %0, @@ -788,7 +788,7 @@ define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( %0, @@ -828,7 +828,7 @@ define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( %0, @@ -868,7 +868,7 @@ define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( %0, @@ -908,7 +908,7 @@ define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( %0, @@ -1456,7 +1456,7 @@ define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( %0, @@ -1484,7 +1484,7 @@ define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( %0, @@ -1512,7 +1512,7 @@ define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( %0, @@ -1540,7 +1540,7 @@ define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( %0, @@ -1568,7 +1568,7 @@ define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( %0, @@ -1596,7 +1596,7 @@ define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( %0, @@ -1624,7 +1624,7 @@ define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( %0, @@ -1652,7 +1652,7 @@ define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( %0, @@ -1680,7 +1680,7 @@ define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( %0, @@ -1736,7 +1736,7 @@ define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( %0, @@ -1764,7 +1764,7 @@ define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( %0, @@ -1792,7 +1792,7 @@ define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( %0, @@ -1820,7 +1820,7 @@ define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( %0, @@ -1848,7 +1848,7 @@ define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( %0, @@ -1876,7 +1876,7 @@ define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( %0, @@ -1904,7 +1904,7 @@ define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( %0, @@ -1932,7 +1932,7 @@ define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -28,7 +28,7 @@ define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( %0, @@ -68,7 +68,7 @@ define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( %0, @@ -108,7 +108,7 @@ define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( %0, @@ -148,7 +148,7 @@ define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( %0, @@ -188,7 +188,7 @@ define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( %0, @@ -228,7 +228,7 @@ define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( %0, @@ -268,7 +268,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( %0, @@ -308,7 +308,7 @@ define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( %0, @@ -348,7 +348,7 @@ define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( %0, @@ -388,7 +388,7 @@ define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( %0, @@ -428,7 +428,7 @@ define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( %0, @@ -468,7 +468,7 @@ define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( %0, @@ -508,7 +508,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( %0, @@ -548,7 +548,7 @@ define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( %0, @@ -588,7 +588,7 @@ define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( %0, @@ -628,7 +628,7 @@ define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( %0, @@ -668,7 +668,7 @@ define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( %0, @@ -708,7 +708,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( %0, @@ -748,7 +748,7 @@ define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( %0, @@ -788,7 +788,7 @@ define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( %0, @@ -828,7 +828,7 @@ define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( %0, @@ -868,7 +868,7 @@ define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( %0, @@ -908,7 +908,7 @@ define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( %0, @@ -948,7 +948,7 @@ define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( %0, @@ -988,7 +988,7 @@ define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( %0, @@ -1028,7 +1028,7 @@ define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( %0, @@ -1068,7 +1068,7 @@ define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( %0, @@ -1108,7 +1108,7 @@ define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( %0, @@ -1148,7 +1148,7 @@ define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( %0, @@ -1188,7 +1188,7 @@ define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( %0, @@ -1228,7 +1228,7 @@ define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( %0, @@ -1268,7 +1268,7 @@ define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( %0, @@ -1308,7 +1308,7 @@ define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( %0, @@ -1348,7 +1348,7 @@ define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( %0, @@ -1388,7 +1388,7 @@ define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( %0, @@ -1428,7 +1428,7 @@ define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( %0, @@ -1468,7 +1468,7 @@ define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( %0, @@ -1508,7 +1508,7 @@ define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( %0, @@ -1548,7 +1548,7 @@ define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( %0, @@ -1588,7 +1588,7 @@ define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( %0, @@ -1628,7 +1628,7 @@ define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i64.i64( %0, @@ -1668,7 +1668,7 @@ define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i64.i64( %0, @@ -1708,7 +1708,7 @@ define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i64.i64( %0, @@ -1748,7 +1748,7 @@ define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i64.i64( %0, @@ -1776,7 +1776,7 @@ define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( %0, @@ -1804,7 +1804,7 @@ define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( %0, @@ -1832,7 +1832,7 @@ define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( %0, @@ -1860,7 +1860,7 @@ define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( %0, @@ -1888,7 +1888,7 @@ define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( %0, @@ -1916,7 +1916,7 @@ define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( %0, @@ -1944,7 +1944,7 @@ define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( %0, @@ -1972,7 +1972,7 @@ define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( %0, @@ -2000,7 +2000,7 @@ define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( %0, @@ -2028,7 +2028,7 @@ define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( %0, @@ -2056,7 +2056,7 @@ define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( %0, @@ -2084,7 +2084,7 @@ define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( %0, @@ -2112,7 +2112,7 @@ define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( %0, @@ -2140,7 +2140,7 @@ define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( %0, @@ -2168,7 +2168,7 @@ define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( %0, @@ -2196,7 +2196,7 @@ define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( %0, @@ -2224,7 +2224,7 @@ define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( %0, @@ -2252,7 +2252,7 @@ define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( %0, @@ -2280,7 +2280,7 @@ define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv1i64.i64( %0, @@ -2308,7 +2308,7 @@ define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv2i64.i64( %0, @@ -2336,7 +2336,7 @@ define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv4i64.i64( %0, @@ -2364,7 +2364,7 @@ define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { entry: ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu ; CHECK: vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vxor.mask.nxv8i64.i64( %0,